summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_extlog.c18
-rw-r--r--drivers/acpi/acpi_lpss.c10
-rw-r--r--drivers/acpi/acpi_pnp.c8
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpica/Makefile10
-rw-r--r--drivers/acpi/acpica/acapps.h9
-rw-r--r--drivers/acpi/acpica/acdebug.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h8
-rw-r--r--drivers/acpi/acpica/aclocal.h18
-rw-r--r--drivers/acpi/acpica/acpredef.h16
-rw-r--r--drivers/acpi/acpica/acutils.h48
-rw-r--r--drivers/acpi/acpica/evgpe.c32
-rw-r--r--drivers/acpi/acpica/evxfgpe.c61
-rw-r--r--drivers/acpi/acpica/exdebug.c11
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/exfield.c26
-rw-r--r--drivers/acpi/acpica/hwregs.c3
-rw-r--r--drivers/acpi/acpica/nsobject.c10
-rw-r--r--drivers/acpi/acpica/utbuffer.c128
-rw-r--r--drivers/acpi/acpica/utcopy.c6
-rw-r--r--drivers/acpi/acpica/utdebug.c26
-rw-r--r--drivers/acpi/acpica/utdecode.c32
-rw-r--r--drivers/acpi/acpica/utfileio.c331
-rw-r--r--drivers/acpi/acpica/utglobal.c146
-rw-r--r--drivers/acpi/acpica/uthex.c100
-rw-r--r--drivers/acpi/acpica/utinit.c145
-rw-r--r--drivers/acpi/acpica/utprint.c664
-rw-r--r--drivers/acpi/acpica/utuuid.c96
-rw-r--r--drivers/acpi/apei/apei-internal.h10
-rw-r--r--drivers/acpi/apei/ghes.c32
-rw-r--r--drivers/acpi/battery.c1
-rw-r--r--drivers/acpi/blacklist.c68
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/button.c25
-rw-r--r--drivers/acpi/device_pm.c148
-rw-r--r--drivers/acpi/internal.h7
-rw-r--r--drivers/acpi/osl.c4
-rw-r--r--drivers/acpi/pci_irq.c7
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/processor_core.c200
-rw-r--r--drivers/acpi/processor_pdc.c206
-rw-r--r--drivers/acpi/scan.c33
-rw-r--r--drivers/acpi/sleep.c5
-rw-r--r--drivers/acpi/video.c121
-rw-r--r--drivers/amba/tegra-ahb.c3
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/libata-core.c72
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/atm/he.c31
-rw-r--r--drivers/atm/idt77252.c15
-rw-r--r--drivers/base/Kconfig10
-rw-r--r--drivers/base/dma-contiguous.c220
-rw-r--r--drivers/base/memory.c30
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/block/DAC960.c18
-rw-r--r--drivers/block/cciss.c11
-rw-r--r--drivers/block/rbd.c689
-rw-r--r--drivers/block/skd_main.c25
-rw-r--r--drivers/block/zram/zram_drv.c71
-rw-r--r--drivers/block/zram/zram_drv.h29
-rw-r--r--drivers/bus/Kconfig8
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/arm-cci.c3
-rw-r--r--drivers/bus/arm-ccn.c1391
-rw-r--r--drivers/bus/imx-weim.c4
-rw-r--r--drivers/char/agp/frontend.c15
-rw-r--r--drivers/char/hw_random/core.c6
-rw-r--r--drivers/char/hw_random/virtio-rng.c39
-rw-r--r--drivers/char/virtio_console.c6
-rw-r--r--drivers/clk/mvebu/clk-cpu.c80
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c241
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c856
-rw-r--r--drivers/clk/tegra/clk-periph-gate.c3
-rw-r--r--drivers/clk/tegra/clk-tegra30.c5
-rw-r--r--drivers/clk/tegra/clk.c3
-rw-r--r--drivers/clk/versatile/Makefile3
-rw-r--r--drivers/clk/versatile/clk-versatile.c (renamed from drivers/clk/versatile/clk-integrator.c)38
-rw-r--r--drivers/clocksource/tegra20_timer.c13
-rw-r--r--drivers/cpufreq/cpufreq.c74
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c11
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c35
-rw-r--r--drivers/cpufreq/intel_pstate.c131
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c6
-rw-r--r--drivers/cpufreq/powernow-k6.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c18
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c2
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c3
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c3
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c131
-rw-r--r--drivers/cpuidle/Kconfig7
-rw-r--r--drivers/cpuidle/Kconfig.arm15
-rw-r--r--drivers/cpuidle/Makefile2
-rw-r--r--drivers/cpuidle/cpuidle-armada-370-xp.c93
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c12
-rw-r--r--drivers/cpuidle/cpuidle-exynos.c25
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c150
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c16
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/cpuidle/driver.c11
-rw-r--r--drivers/cpuidle/governors/ladder.c4
-rw-r--r--drivers/cpuidle/governors/menu.c3
-rw-r--r--drivers/cpuidle/sysfs.c2
-rw-r--r--drivers/crypto/hifn_795x.c5
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/dma/Kconfig17
-rw-r--r--drivers/dma/Makefile6
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c6
-rw-r--r--drivers/dma/at_hdmac.c15
-rw-r--r--drivers/dma/bcm2835-dma.c2
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dw/core.c42
-rw-r--r--drivers/dma/edma.c24
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/fsl-edma.c8
-rw-r--r--drivers/dma/fsldma.c297
-rw-r--r--drivers/dma/fsldma.h32
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c17
-rw-r--r--drivers/dma/ipu/ipu_idmac.c14
-rw-r--r--drivers/dma/mmp_pdma.c2
-rw-r--r--drivers/dma/mmp_tdma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c13
-rw-r--r--drivers/dma/mxs-dma.c10
-rw-r--r--drivers/dma/nbpfaxi.c1517
-rw-r--r--drivers/dma/of-dma.c35
-rw-r--r--drivers/dma/omap-dma.c3
-rw-r--r--drivers/dma/pl330.c964
-rw-r--r--drivers/dma/qcom_bam_dma.c20
-rw-r--r--drivers/dma/s3c24xx-dma.c3
-rw-r--r--drivers/dma/sa11x0-dma.c2
-rw-r--r--drivers/dma/sh/Kconfig24
-rw-r--r--drivers/dma/sh/Makefile16
-rw-r--r--drivers/dma/sh/rcar-audmapp.c114
-rw-r--r--drivers/dma/sh/shdma-arm.h4
-rw-r--r--drivers/dma/sh/shdma-base.c103
-rw-r--r--drivers/dma/sh/shdma.h2
-rw-r--r--drivers/dma/sh/shdmac.c15
-rw-r--r--drivers/dma/sirf-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c3
-rw-r--r--drivers/dma/sun6i-dma.c1053
-rw-r--r--drivers/dma/tegra20-apb-dma.c2
-rw-r--r--drivers/firmware/efi/cper.c20
-rw-r--r--drivers/firmware/efi/runtime-map.c21
-rw-r--r--drivers/firmware/memmap.c6
-rw-r--r--drivers/gpio/Kconfig23
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/devres.c40
-rw-r--r--drivers/gpio/gpio-74x164.c8
-rw-r--r--drivers/gpio/gpio-adnp.c9
-rw-r--r--drivers/gpio/gpio-adp5520.c8
-rw-r--r--drivers/gpio/gpio-adp5588.c6
-rw-r--r--drivers/gpio/gpio-amd8111.c3
-rw-r--r--drivers/gpio/gpio-arizona.c3
-rw-r--r--drivers/gpio/gpio-crystalcove.c380
-rw-r--r--drivers/gpio/gpio-cs5535.c8
-rw-r--r--drivers/gpio/gpio-da9052.c3
-rw-r--r--drivers/gpio/gpio-da9055.c3
-rw-r--r--drivers/gpio/gpio-dwapb.c2
-rw-r--r--drivers/gpio/gpio-em.c5
-rw-r--r--drivers/gpio/gpio-f7188x.c18
-rw-r--r--drivers/gpio/gpio-generic.c3
-rw-r--r--drivers/gpio/gpio-grgpio.c4
-rw-r--r--drivers/gpio/gpio-ich.c9
-rw-r--r--drivers/gpio/gpio-intel-mid.c86
-rw-r--r--drivers/gpio/gpio-it8761e.c6
-rw-r--r--drivers/gpio/gpio-janz-ttl.c8
-rw-r--r--drivers/gpio/gpio-kempld.c3
-rw-r--r--drivers/gpio/gpio-lp3943.c3
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c98
-rw-r--r--drivers/gpio/gpio-max730x.c13
-rw-r--r--drivers/gpio/gpio-max732x.c7
-rw-r--r--drivers/gpio/gpio-mc33880.c11
-rw-r--r--drivers/gpio/gpio-mc9s08dz60.c3
-rw-r--r--drivers/gpio/gpio-mcp23s08.c26
-rw-r--r--drivers/gpio/gpio-ml-ioh.c8
-rw-r--r--drivers/gpio/gpio-msm-v2.c5
-rw-r--r--drivers/gpio/gpio-mxc.c2
-rw-r--r--drivers/gpio/gpio-octeon.c3
-rw-r--r--drivers/gpio/gpio-omap.c275
-rw-r--r--drivers/gpio/gpio-palmas.c3
-rw-r--r--drivers/gpio/gpio-pca953x.c7
-rw-r--r--drivers/gpio/gpio-pcf857x.c4
-rw-r--r--drivers/gpio/gpio-pch.c10
-rw-r--r--drivers/gpio/gpio-pxa.c7
-rw-r--r--drivers/gpio/gpio-rc5t583.c3
-rw-r--r--drivers/gpio/gpio-rcar.c9
-rw-r--r--drivers/gpio/gpio-rdc321x.c7
-rw-r--r--drivers/gpio/gpio-samsung.c965
-rw-r--r--drivers/gpio/gpio-sch.c16
-rw-r--r--drivers/gpio/gpio-sch311x.c6
-rw-r--r--drivers/gpio/gpio-sodaville.c4
-rw-r--r--drivers/gpio/gpio-stmpe.c119
-rw-r--r--drivers/gpio/gpio-sx150x.c7
-rw-r--r--drivers/gpio/gpio-syscon.c3
-rw-r--r--drivers/gpio/gpio-tb10x.c5
-rw-r--r--drivers/gpio/gpio-tc3589x.c8
-rw-r--r--drivers/gpio/gpio-timberdale.c5
-rw-r--r--drivers/gpio/gpio-tps6586x.c3
-rw-r--r--drivers/gpio/gpio-tps65910.c3
-rw-r--r--drivers/gpio/gpio-tps65912.c3
-rw-r--r--drivers/gpio/gpio-ts5500.c6
-rw-r--r--drivers/gpio/gpio-twl4030.c6
-rw-r--r--drivers/gpio/gpio-twl6040.c3
-rw-r--r--drivers/gpio/gpio-ucb1400.c4
-rw-r--r--drivers/gpio/gpio-viperboard.c10
-rw-r--r--drivers/gpio/gpio-vr41xx.c8
-rw-r--r--drivers/gpio/gpio-vx855.c3
-rw-r--r--drivers/gpio/gpio-wm831x.c3
-rw-r--r--drivers/gpio/gpio-wm8350.c3
-rw-r--r--drivers/gpio/gpio-wm8994.c3
-rw-r--r--drivers/gpio/gpio-zynq.c692
-rw-r--r--drivers/gpio/gpiolib-acpi.c44
-rw-r--r--drivers/gpio/gpiolib-legacy.c102
-rw-r--r--drivers/gpio/gpiolib-of.c10
-rw-r--r--drivers/gpio/gpiolib-sysfs.c827
-rw-r--r--drivers/gpio/gpiolib.c1273
-rw-r--r--drivers/gpio/gpiolib.h105
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile8
-rw-r--r--drivers/gpu/drm/armada/armada_510.c23
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c187
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h11
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h13
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c245
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_output.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c16
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c17
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c2
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c14
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_context.c102
-rw-r--r--drivers/gpu/drm/drm_crtc.c604
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c182
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2715
-rw-r--r--drivers/gpu/drm/drm_drv.c1190
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c115
-rw-r--r--drivers/gpu/drm/drm_fops.c85
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_hashtab.c2
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c372
-rw-r--r--drivers/gpu/drm/drm_legacy.h51
-rw-r--r--drivers/gpu/drm/drm_lock.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c14
-rw-r--r--drivers/gpu/drm/drm_of.c67
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c9
-rw-r--r--drivers/gpu/drm/drm_rect.c140
-rw-r--r--drivers/gpu/drm/drm_stub.c805
-rw-r--r--drivers/gpu/drm/drm_sysfs.c92
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c285
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c277
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c259
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c5
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c390
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c425
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c70
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h190
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c95
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c308
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c161
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c122
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c639
-rw-r--r--drivers/gpu/drm/i915/i915_params.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h550
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c57
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c474
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1463
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c743
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c548
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h155
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c57
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c81
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c37
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c102
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c20
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c160
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c938
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h2
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c491
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h92
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c36
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c212
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c16
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h58
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h296
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h56
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h239
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c69
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h109
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c27
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c377
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h431
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c159
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c91
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c31
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h8
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile25
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c176
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ioctl.c531
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/core/object.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/core/parent.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/acpi.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/acpi.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c396
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/ctrl.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv20.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv30.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c49
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c96
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/conn.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c137
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c477
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h131
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c380
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c100
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c160
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c210
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c170
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c113
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c121
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c123
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h65
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c85
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c74
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/gm107.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv108.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c265
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve4.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/perfmon/base.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c39
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h470
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h51
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ioctl.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/notify.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/perfmon.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/class.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/event.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h1
l---------drivers/gpu/drm/nouveau/core/include/nvif/unpack.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bar.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h30
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltc.h35
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/base.c197
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c665
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/base.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c)165
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c58
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c (renamed from drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c)121
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/priv.h38
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h658
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h222
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/arb.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c42
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c54
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c114
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.h84
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c120
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c27
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c246
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_agp.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c149
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c246
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c289
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c307
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h70
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c142
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c58
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_nvif.c136
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c182
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.c88
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sysfs.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c384
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c920
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c31
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h558
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c129
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.h62
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h21
-rw-r--r--drivers/gpu/drm/nouveau/nvif/event.h62
-rw-r--r--drivers/gpu/drm/nouveau/nvif/ioctl.h128
-rw-r--r--drivers/gpu/drm/nouveau/nvif/list.h353
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c237
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.h39
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c302
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.h75
l---------drivers/gpu/drm/nouveau/nvif/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/unpack.h24
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c27
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c21
-rw-r--r--drivers/gpu/drm/panel/Kconfig7
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c21
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c203
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c13
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c722
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c247
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c18
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c178
-rw-r--r--drivers/gpu/drm/radeon/r100.c62
-rw-r--r--drivers/gpu/drm/radeon/r300.c13
-rw-r--r--drivers/gpu/drm/radeon/r600.c24
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h127
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c388
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c319
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.c167
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h71
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c278
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c17
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c460
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c172
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c152
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h5
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c62
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig14
-rw-r--r--drivers/gpu/drm/sti/Makefile21
-rw-r--r--drivers/gpu/drm/sti/NOTES58
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c281
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h90
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c421
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c241
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.h29
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c195
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c549
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h16
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c794
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c810
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h88
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c336
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c211
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c197
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h123
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c249
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h54
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c648
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c138
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h12
-rw-r--r--drivers/gpu/drm/sti/sti_vtac.c215
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c366
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h28
-rw-r--r--drivers/gpu/drm/tegra/dc.c123
-rw-r--r--drivers/gpu/drm/tegra/dc.h5
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c216
-rw-r--r--drivers/gpu/drm/tegra/drm.h11
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c66
-rw-r--r--drivers/gpu/drm/tegra/gem.c5
-rw-r--r--drivers/gpu/drm/tegra/gem.h16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c1
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/sor.c24
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c41
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c29
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c36
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c13
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c13
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c5
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c8
-rw-r--r--drivers/gpu/drm/udl/udl_main.c15
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c341
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c227
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c396
-rw-r--r--drivers/gpu/host1x/job.c22
-rw-r--r--drivers/gpu/vga/vgaarb.c40
-rw-r--r--drivers/hid/Kconfig45
-rw-r--r--drivers/hid/Makefile7
-rw-r--r--drivers/hid/hid-core.c24
-rw-r--r--drivers/hid/hid-cp2112.c111
-rw-r--r--drivers/hid/hid-gt683r.c321
-rw-r--r--drivers/hid/hid-huion.c265
-rw-r--r--drivers/hid/hid-hyperv.c6
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c462
-rw-r--r--drivers/hid/hid-lenovo.c708
-rw-r--r--drivers/hid/hid-picolcd_debugfs.c9
-rw-r--r--drivers/hid/hid-rmi.c67
-rw-r--r--drivers/hid/hid-roccat-lua.c2
-rw-r--r--drivers/hid/hid-sony.c132
-rw-r--r--drivers/hid/hid-wacom.c973
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c15
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom.h (renamed from drivers/input/tablet/wacom.h)27
-rw-r--r--drivers/hid/wacom_sys.c (renamed from drivers/input/tablet/wacom_sys.c)1229
-rw-r--r--drivers/hid/wacom_wac.c (renamed from drivers/input/tablet/wacom_wac.c)1176
-rw-r--r--drivers/hid/wacom_wac.h (renamed from drivers/input/tablet/wacom_wac.h)22
-rw-r--r--drivers/hsi/clients/ssi_protocol.c2
-rw-r--r--drivers/hsi/controllers/omap_ssi.c14
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c15
-rw-r--r--drivers/hwmon/asus_atk0110.c2
-rw-r--r--drivers/i2c/Kconfig18
-rw-r--r--drivers/i2c/Makefile5
-rw-r--r--drivers/i2c/busses/Kconfig46
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-at91.c6
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c2
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c17
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c9
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c3
-rw-r--r--drivers/i2c/busses/i2c-efm32.c8
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c16
-rw-r--r--drivers/i2c/busses/i2c-gpio.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c139
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-mpc.c3
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c2
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c8
-rw-r--r--drivers/i2c/busses/i2c-ocores.c12
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/i2c/busses/i2c-qup.c12
-rw-r--r--drivers/i2c/busses/i2c-rcar.c14
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c16
-rw-r--r--drivers/i2c/busses/i2c-s6000.c404
-rw-r--r--drivers/i2c/busses/i2c-s6000.h79
-rw-r--r--drivers/i2c/busses/i2c-sirf.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c34
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c13
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c12
-rw-r--r--drivers/i2c/busses/scx200_i2c.c129
-rw-r--r--drivers/i2c/i2c-acpi.c362
-rw-r--r--drivers/i2c/i2c-core.c107
-rw-r--r--drivers/i2c/i2c-stub.c237
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c15
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c5
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/cap1106.c335
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c70
-rw-r--r--drivers/input/keyboard/imx_keypad.c6
-rw-r--r--drivers/input/keyboard/lm8323.c22
-rw-r--r--drivers/input/keyboard/max7359_keypad.c45
-rw-r--r--drivers/input/misc/keyspan_remote.c1
-rw-r--r--drivers/input/misc/soc_button_array.c3
-rw-r--r--drivers/input/misc/uinput.c47
-rw-r--r--drivers/input/mouse/alps.c691
-rw-r--r--drivers/input/mouse/alps.h60
-rw-r--r--drivers/input/serio/hyperv-keyboard.c13
-rw-r--r--drivers/input/tablet/Kconfig18
-rw-r--r--drivers/input/tablet/Makefile4
-rw-r--r--drivers/input/tablet/wacom_serial4.c620
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/ads7846.c6
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c1313
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c3
-rw-r--r--drivers/input/touchscreen/ipaq-micro-ts.c166
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c70
-rw-r--r--drivers/input/touchscreen/mcs5000_ts.c6
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c250
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c4
-rw-r--r--drivers/input/touchscreen/zforce_ts.c31
-rw-r--r--drivers/iommu/dmar.c28
-rw-r--r--drivers/iommu/tegra-smmu.c3
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c18
-rw-r--r--drivers/leds/Kconfig15
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/led-class.c19
-rw-r--r--drivers/leds/led-core.c11
-rw-r--r--drivers/leds/leds-atmel-pwm.c149
-rw-r--r--drivers/leds/leds-ipaq-micro.c141
-rw-r--r--drivers/leds/leds-lm3530.c20
-rw-r--r--drivers/leds/leds-lm3533.c20
-rw-r--r--drivers/leds/leds-lm355x.c21
-rw-r--r--drivers/leds/leds-lm3642.c30
-rw-r--r--drivers/leds/leds-lp55xx-common.c20
-rw-r--r--drivers/leds/leds-max8997.c16
-rw-r--r--drivers/leds/leds-netxbig.c31
-rw-r--r--drivers/leds/leds-ns2.c16
-rw-r--r--drivers/leds/leds-pca963x.c28
-rw-r--r--drivers/leds/leds-ss4200.c14
-rw-r--r--drivers/leds/leds-wm831x-status.c23
-rw-r--r--drivers/lguest/core.c7
-rw-r--r--drivers/macintosh/via-pmu-backlight.c6
-rw-r--r--drivers/mailbox/Kconfig19
-rw-r--r--drivers/mailbox/Makefile6
-rw-r--r--drivers/mailbox/mailbox-omap1.c203
-rw-r--r--drivers/mailbox/mailbox-omap2.c357
-rw-r--r--drivers/mailbox/omap-mailbox.c444
-rw-r--r--drivers/mailbox/omap-mbox.h67
-rw-r--r--drivers/md/md.c13
-rw-r--r--drivers/md/raid0.c6
-rw-r--r--drivers/md/raid1.c8
-rw-r--r--drivers/md/raid10.c11
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c15
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c5
-rw-r--r--drivers/media/pci/bt8xx/bt878.c16
-rw-r--r--drivers/media/pci/ngene/ngene-core.c7
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c11
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c11
-rw-r--r--drivers/memory/Kconfig10
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/fsl-corenet-cf.c251
-rw-r--r--drivers/message/fusion/mptbase.c23
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c18
-rw-r--r--drivers/message/fusion/mptfc.c9
-rw-r--r--drivers/message/fusion/mptsas.c76
-rw-r--r--drivers/message/fusion/mptsas.h2
-rw-r--r--drivers/message/fusion/mptscsih.c31
-rw-r--r--drivers/message/fusion/mptscsih.h4
-rw-r--r--drivers/message/fusion/mptspi.c5
-rw-r--r--drivers/message/i2o/i2o_scsi.c11
-rw-r--r--drivers/mfd/88pm805.c2
-rw-r--r--drivers/mfd/88pm860x-core.c37
-rw-r--r--drivers/mfd/88pm860x-i2c.c3
-rw-r--r--drivers/mfd/Kconfig25
-rw-r--r--drivers/mfd/Makefile5
-rw-r--r--drivers/mfd/aat2870-core.c5
-rw-r--r--drivers/mfd/ab3100-core.c54
-rw-r--r--drivers/mfd/ab8500-core.c49
-rw-r--r--drivers/mfd/ab8500-debugfs.c308
-rw-r--r--drivers/mfd/arizona-core.c53
-rw-r--r--drivers/mfd/arizona-i2c.c5
-rw-r--r--drivers/mfd/arizona-irq.c29
-rw-r--r--drivers/mfd/arizona-spi.c3
-rw-r--r--drivers/mfd/arizona.h5
-rw-r--r--drivers/mfd/asic3.c12
-rw-r--r--drivers/mfd/cros_ec.c97
-rw-r--r--drivers/mfd/cros_ec_i2c.c44
-rw-r--r--drivers/mfd/cros_ec_spi.c56
-rw-r--r--drivers/mfd/da9063-core.c6
-rw-r--r--drivers/mfd/da9063-i2c.c134
-rw-r--r--drivers/mfd/dm355evm_msp.c2
-rw-r--r--drivers/mfd/ezx-pcap.c3
-rw-r--r--drivers/mfd/htc-i2cpld.c5
-rw-r--r--drivers/mfd/intel_msic.c2
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c170
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h32
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c158
-rw-r--r--drivers/mfd/ipaq-micro.c8
-rw-r--r--drivers/mfd/kempld-core.c10
-rw-r--r--drivers/mfd/lp8788-irq.c2
-rw-r--r--drivers/mfd/max77686-irq.c319
-rw-r--r--drivers/mfd/max77686.c329
-rw-r--r--drivers/mfd/max8925-core.c2
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/mc13xxx-core.c310
-rw-r--r--drivers/mfd/mc13xxx.h11
-rw-r--r--drivers/mfd/mcp-core.c1
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/pcf50633-core.c18
-rw-r--r--drivers/mfd/pm8921-core.c4
-rw-r--r--drivers/mfd/rtsx_pcr.c76
-rw-r--r--drivers/mfd/sec-core.c82
-rw-r--r--drivers/mfd/sec-irq.c110
-rw-r--r--drivers/mfd/si476x-cmd.c12
-rw-r--r--drivers/mfd/stmpe-i2c.c4
-rw-r--r--drivers/mfd/stmpe.c2
-rw-r--r--drivers/mfd/stmpe.h2
-rw-r--r--drivers/mfd/sun6i-prcm.c30
-rw-r--r--drivers/mfd/tc3589x.c2
-rw-r--r--drivers/mfd/tc6387xb.c7
-rw-r--r--drivers/mfd/tps6105x.c17
-rw-r--r--drivers/mfd/tps65910.c10
-rw-r--r--drivers/mfd/tps65912-spi.c3
-rw-r--r--drivers/mfd/twl4030-irq.c6
-rw-r--r--drivers/mfd/twl6030-irq.c4
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/wm5102-tables.c8
-rw-r--r--drivers/mfd/wm5110-tables.c245
-rw-r--r--drivers/mfd/wm8350-i2c.c8
-rw-r--r--drivers/mfd/wm8350-irq.c3
-rw-r--r--drivers/mfd/wm8994-regmap.c64
-rw-r--r--drivers/mfd/wm8997-tables.c16
-rw-r--r--drivers/misc/Kconfig10
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/atmel-ssc.c13
-rw-r--r--drivers/misc/atmel_pwm.c402
-rw-r--r--drivers/misc/fuse/Makefile1
-rw-r--r--drivers/mmc/host/au1xmmc.c198
-rw-r--r--drivers/mmc/host/rtsx_pci_sdmmc.c133
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c375
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/phram.c7
-rw-r--r--drivers/mtd/ftl.c4
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c2
-rw-r--r--drivers/mtd/mtdcore.c59
-rw-r--r--drivers/mtd/mtdpart.c13
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/atmel_nand.c142
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h4
-rw-r--r--drivers/mtd/nand/au1550nd.c29
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c24
-rw-r--r--drivers/mtd/nand/denali.c6
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c71
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c6
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c6
-rw-r--r--drivers/mtd/nand/nand_base.c18
-rw-r--r--drivers/mtd/nand/nand_bbt.c14
-rw-r--r--drivers/mtd/nand/nand_timings.c253
-rw-r--r--drivers/mtd/nand/s3c2410.c4
-rw-r--r--drivers/mtd/onenand/Kconfig4
-rw-r--r--drivers/mtd/onenand/samsung.c25
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c53
-rw-r--r--drivers/mtd/ubi/block.c18
-rw-r--r--drivers/mtd/ubi/vtbl.c2
-rw-r--r--drivers/mtd/ubi/wl.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c149
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c45
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c8
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c11
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/irda/au1k_ir.c48
-rw-r--r--drivers/net/irda/vlsi_ir.c4
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c16
-rw-r--r--drivers/net/wireless/mwl8k.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c11
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c17
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/pci/host/Kconfig8
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-tegra.c227
-rw-r--r--drivers/pci/host/pcie-spear13xx.c393
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c33
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c4
-rw-r--r--drivers/pci/pci-acpi.c76
-rw-r--r--drivers/pcmcia/Kconfig10
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c1168
-rw-r--r--drivers/phy/Kconfig36
-rw-r--r--drivers/phy/Makefile5
-rw-r--r--drivers/phy/phy-s5pv210-usb2.c187
-rw-r--r--drivers/phy/phy-samsung-usb2.c6
-rw-r--r--drivers/phy/phy-samsung-usb2.h1
-rw-r--r--drivers/phy/phy-spear1310-miphy.c274
-rw-r--r--drivers/phy/phy-spear1340-miphy.c307
-rw-r--r--drivers/pinctrl/Kconfig123
-rw-r--r--drivers/pinctrl/Makefile25
-rw-r--r--drivers/pinctrl/core.c24
-rw-r--r--drivers/pinctrl/nomadik/Kconfig51
-rw-r--r--drivers/pinctrl/nomadik/Makefile10
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8500.c (renamed from drivers/pinctrl/pinctrl-ab8500.c)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8505.c (renamed from drivers/pinctrl/pinctrl-ab8505.c)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab8540.c (renamed from drivers/pinctrl/pinctrl-ab8540.c)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-ab9540.c (renamed from drivers/pinctrl/pinctrl-ab9540.c)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c (renamed from drivers/pinctrl/pinctrl-abx500.c)19
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.h (renamed from drivers/pinctrl/pinctrl-abx500.h)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c (renamed from drivers/pinctrl/pinctrl-nomadik-db8500.c)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c (renamed from drivers/pinctrl/pinctrl-nomadik-db8540.c)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c (renamed from drivers/pinctrl/pinctrl-nomadik-stn8815.c)0
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c (renamed from drivers/pinctrl/pinctrl-nomadik.c)18
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.h (renamed from drivers/pinctrl/pinctrl-nomadik.h)0
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c41
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c11
-rw-r--r--drivers/pinctrl/pinctrl-at91.c27
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c103
-rw-r--r--drivers/pinctrl/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c11
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c10
-rw-r--r--drivers/pinctrl/pinctrl-imx.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx1.c279
-rw-r--r--drivers/pinctrl/pinctrl-imx27.c52
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c401
-rw-r--r--drivers/pinctrl/pinctrl-single.c61
-rw-r--r--drivers/pinctrl/pinctrl-st.c15
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c17
-rw-r--r--drivers/pinctrl/pinctrl-tegra-xusb.c973
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c13
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c28
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c58
-rw-r--r--drivers/pinctrl/pinctrl-u300.c14
-rw-r--r--drivers/pinctrl/pinmux.c4
-rw-r--r--drivers/pinctrl/qcom/Kconfig42
-rw-r--r--drivers/pinctrl/qcom/Makefile6
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c (renamed from drivers/pinctrl/pinctrl-apq8064.c)19
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c (renamed from drivers/pinctrl/pinctrl-ipq8064.c)17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c (renamed from drivers/pinctrl/pinctrl-msm.c)42
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h (renamed from drivers/pinctrl/pinctrl-msm.h)0
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c1282
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c (renamed from drivers/pinctrl/pinctrl-msm8x74.c)29
-rw-r--r--drivers/pinctrl/samsung/Kconfig28
-rw-r--r--drivers/pinctrl/samsung/Makefile7
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c (renamed from drivers/pinctrl/pinctrl-exynos.c)307
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h (renamed from drivers/pinctrl/pinctrl-exynos.h)0
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c (renamed from drivers/pinctrl/pinctrl-exynos5440.c)10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c (renamed from drivers/pinctrl/pinctrl-s3c24xx.c)0
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c (renamed from drivers/pinctrl/pinctrl-s3c64xx.c)0
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c (renamed from drivers/pinctrl/pinctrl-samsung.c)728
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h (renamed from drivers/pinctrl/pinctrl-samsung.h)18
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c9
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c417
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c11
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c22
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c10
-rw-r--r--drivers/pinctrl/spear/Kconfig1
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c84
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c7
-rw-r--r--drivers/pinctrl/sunxi/Kconfig24
-rw-r--r--drivers/pinctrl/sunxi/Makefile2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c217
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c142
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c593
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c187
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.h44
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c21
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c144
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c14
-rw-r--r--drivers/platform/x86/samsung-q10.c6
-rw-r--r--drivers/pnp/pnpacpi/core.c91
-rw-r--r--drivers/power/Kconfig7
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/bq2415x_charger.c8
-rw-r--r--drivers/power/bq27x00_battery.c28
-rw-r--r--drivers/power/ipaq_micro_battery.c290
-rw-r--r--drivers/power/power_supply_core.c3
-rw-r--r--drivers/power/power_supply_sysfs.c4
-rw-r--r--drivers/power/reset/Kconfig17
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/brcmstb-reboot.c120
-rw-r--r--drivers/power/reset/gpio-poweroff.c52
-rw-r--r--drivers/power/reset/hisi-reboot.c67
-rw-r--r--drivers/power/reset/restart-poweroff.c2
-rw-r--r--drivers/power/rx51_battery.c90
-rw-r--r--drivers/power/tps65090-charger.c76
-rw-r--r--drivers/power/twl4030_charger.c44
-rw-r--r--drivers/pwm/Kconfig19
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-imx.c1
-rw-r--r--drivers/pwm/pwm-lpss.c32
-rw-r--r--drivers/pwm/pwm-rockchip.c264
-rw-r--r--drivers/pwm/pwm-sti.c418
-rw-r--r--drivers/pwm/pwm-tipwmss.c4
-rw-r--r--drivers/rapidio/devices/tsi721.h12
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c718
-rw-r--r--drivers/rapidio/rio.c66
-rw-r--r--drivers/regulator/s2mps11.c321
-rw-r--r--drivers/rtc/Kconfig29
-rw-r--r--drivers/rtc/Makefile5
-rw-r--r--drivers/rtc/class.c16
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-au1xxx.c18
-rw-r--r--drivers/rtc/rtc-da9063.c54
-rw-r--r--drivers/rtc/rtc-ds1343.c75
-rw-r--r--drivers/rtc/rtc-ds1742.c2
-rw-r--r--drivers/rtc/rtc-efi-platform.c31
-rw-r--r--drivers/rtc/rtc-efi.c32
-rw-r--r--drivers/rtc/rtc-isl12022.c12
-rw-r--r--drivers/rtc/rtc-max77686.c27
-rw-r--r--drivers/rtc/rtc-pcf85063.c204
-rw-r--r--drivers/rtc/rtc-pcf8563.c231
-rw-r--r--drivers/rtc/rtc-tps65910.c4
-rw-r--r--drivers/s390/block/dasd.c196
-rw-r--r--drivers/s390/block/dasd_eckd.c30
-rw-r--r--drivers/s390/block/dasd_int.h5
-rw-r--r--drivers/s390/block/dasd_ioctl.c33
-rw-r--r--drivers/s390/char/con3215.c32
-rw-r--r--drivers/s390/cio/qdio_setup.c53
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c161
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c3
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c49
-rw-r--r--drivers/s390/scsi/zfcp_unit.c4
-rw-r--r--drivers/scsi/3w-sas.c5
-rw-r--r--drivers/scsi/3w-xxxx.h4
-rw-r--r--drivers/scsi/53c700.c7
-rw-r--r--drivers/scsi/Kconfig61
-rw-r--r--drivers/scsi/Makefile5
-rw-r--r--drivers/scsi/NCR5380.c31
-rw-r--r--drivers/scsi/NCR53c406a.c4
-rw-r--r--drivers/scsi/a100u2w.c10
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/advansys.c6
-rw-r--r--drivers/scsi/aha152x.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7770_osm.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c2
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c11
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm_pci.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/arm/acornscsi.c14
-rw-r--r--drivers/scsi/arm/fas216.c11
-rw-r--r--drivers/scsi/arm/queue.c3
-rw-r--r--drivers/scsi/atari_NCR5380.c62
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c10
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c23
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c6
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h2
-rw-r--r--drivers/scsi/bfa/bfad.c62
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c16
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/Kconfig4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.c13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c11
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c3
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h6
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h6
-rw-r--r--drivers/scsi/bnx2i/Kconfig4
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c14
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c15
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c6
-rw-r--r--drivers/scsi/ch.c30
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c24
-rw-r--r--drivers/scsi/csiostor/csio_wr.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/dc395x.c51
-rw-r--r--drivers/scsi/dpt_i2o.c38
-rw-r--r--drivers/scsi/dpti.h6
-rw-r--r--drivers/scsi/eata.c9
-rw-r--r--drivers/scsi/fnic/fnic_isr.c4
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c20
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/hosts.c50
-rw-r--r--drivers/scsi/hpsa.c45
-rw-r--r--drivers/scsi/hptiop.c2
-rw-r--r--drivers/scsi/ibmvscsi/Makefile1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c477
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h268
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c1001
-rw-r--r--drivers/scsi/in2000.c6
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/libiscsi.c12
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c16
-rw-r--r--drivers/scsi/libsrp.c447
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c36
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/mega_common.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c4
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c75
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c83
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c75
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c92
-rw-r--r--drivers/scsi/mvsas/mv_sas.c24
-rw-r--r--drivers/scsi/mvumi.c9
-rw-r--r--drivers/scsi/ncr53c8xx.c2
-rw-r--r--drivers/scsi/ncr53c8xx.h4
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pas16.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c65
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c40
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c43
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c48
-rw-r--r--drivers/scsi/pmcraid.c6
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c48
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c22
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c20
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c132
-rw-r--r--drivers/scsi/scsi_debug.c1483
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c186
-rw-r--r--drivers/scsi/scsi_ioctl.c17
-rw-r--r--drivers/scsi/scsi_lib.c935
-rw-r--r--drivers/scsi/scsi_priv.h5
-rw-r--r--drivers/scsi/scsi_proc.c2
-rw-r--r--drivers/scsi/scsi_scan.c187
-rw-r--r--drivers/scsi/scsi_sysfs.c49
-rw-r--r--drivers/scsi/scsi_tgt_if.c399
-rw-r--r--drivers/scsi/scsi_tgt_lib.c661
-rw-r--r--drivers/scsi/scsi_tgt_priv.h32
-rw-r--r--drivers/scsi/scsi_transport_fc.c20
-rw-r--r--drivers/scsi/scsi_transport_fc_internal.h26
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/scsi_transport_srp.c18
-rw-r--r--drivers/scsi/scsi_transport_srp_internal.h25
-rw-r--r--drivers/scsi/sd.c220
-rw-r--r--drivers/scsi/sd.h3
-rw-r--r--drivers/scsi/sg.c657
-rw-r--r--drivers/scsi/sr.c70
-rw-r--r--drivers/scsi/sr.h5
-rw-r--r--drivers/scsi/sr_ioctl.c26
-rw-r--r--drivers/scsi/sr_vendor.c36
-rw-r--r--drivers/scsi/st.c605
-rw-r--r--drivers/scsi/storvsc_drv.c123
-rw-r--r--drivers/scsi/sun3_NCR5380.c57
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h2
-rw-r--r--drivers/scsi/tmscsim.c6
-rw-r--r--drivers/scsi/u14-34f.c10
-rw-r--r--drivers/scsi/ufs/ufs.h38
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c26
-rw-r--r--drivers/scsi/ufs/ufshcd.c427
-rw-r--r--drivers/scsi/ufs/ufshci.h5
-rw-r--r--drivers/scsi/virtio_scsi.c75
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/scsi/wd33c93.c33
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/tegra/Makefile4
-rw-r--r--drivers/soc/tegra/common.c30
-rw-r--r--drivers/soc/tegra/fuse/Makefile8
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c163
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra20.c215
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c224
-rw-r--r--drivers/soc/tegra/fuse/fuse.h71
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra114.c110
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra124.c168
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra20.c110
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra30.c288
-rw-r--r--drivers/soc/tegra/fuse/tegra-apbmisc.c115
-rw-r--r--drivers/soc/tegra/pmc.c957
-rw-r--r--drivers/spi/spi-au1550.c66
-rw-r--r--drivers/spi/spi-s3c64xx.c22
-rw-r--r--drivers/staging/android/binder.c4
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c15
-rw-r--r--drivers/staging/rtl8192ee/pci.c37
-rw-r--r--drivers/staging/rtl8821ae/pci.c36
-rw-r--r--drivers/staging/rts5208/rtsx.c4
-rw-r--r--drivers/staging/slicoss/slicoss.c9
-rw-r--r--drivers/staging/vt6655/device_main.c40
-rw-r--r--drivers/target/loopback/tcm_loop.c2
-rw-r--r--drivers/target/target_core_pscsi.c12
-rw-r--r--drivers/thermal/Kconfig7
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/cpu_cooling.c2
-rw-r--r--drivers/thermal/int3403_thermal.c67
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c11
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h3
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c89
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h7
-rw-r--r--drivers/thermal/st/Kconfig12
-rw-r--r--drivers/thermal/st/Makefile3
-rw-r--r--drivers/thermal/st/st_thermal.c313
-rw-r--r--drivers/thermal/st/st_thermal.h104
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c209
-rw-r--r--drivers/thermal/st/st_thermal_syscfg.c179
-rw-r--r--drivers/tty/serial/samsung.c4
-rw-r--r--drivers/tty/synclink_gt.c5
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/storage/Kconfig4
-rw-r--r--drivers/usb/storage/sddr09.c4
-rw-r--r--drivers/usb/storage/usb.c10
-rw-r--r--drivers/vfio/Makefile1
-rw-r--r--drivers/vfio/pci/vfio_pci.c18
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c17
-rw-r--r--drivers/vfio/vfio_spapr_eeh.c87
-rw-r--r--drivers/video/backlight/Kconfig20
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/aat2870_bl.c6
-rw-r--r--drivers/video/backlight/ams369fg06.c6
-rw-r--r--drivers/video/backlight/atmel-pwm-bl.c223
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/bd6107.c6
-rw-r--r--drivers/video/backlight/gpio_backlight.c6
-rw-r--r--drivers/video/backlight/ipaq_micro_bl.c83
-rw-r--r--drivers/video/backlight/jornada720_lcd.c37
-rw-r--r--drivers/video/backlight/ld9040.c6
-rw-r--r--drivers/video/backlight/lp855x_bl.c6
-rw-r--r--drivers/video/backlight/lp8788_bl.c6
-rw-r--r--drivers/video/backlight/lv5207lp.c6
-rw-r--r--drivers/video/backlight/pandora_bl.c6
-rw-r--r--drivers/video/backlight/pwm_bl.c13
-rw-r--r--drivers/video/backlight/s6e63m0.c6
-rw-r--r--drivers/video/backlight/tps65217_bl.c6
-rw-r--r--drivers/video/fbdev/68328fb.c8
-rw-r--r--drivers/video/fbdev/Kconfig33
-rw-r--r--drivers/video/fbdev/Makefile4
-rw-r--r--drivers/video/fbdev/amba-clcd-versatile.c182
-rw-r--r--drivers/video/fbdev/amba-clcd.c263
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c3
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c6
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c6
-rw-r--r--drivers/video/fbdev/au1100fb.c39
-rw-r--r--drivers/video/fbdev/au1100fb.h1
-rw-r--r--drivers/video/fbdev/au1200fb.c81
-rw-r--r--drivers/video/fbdev/clps711x-fb.c397
-rw-r--r--drivers/video/fbdev/da8xx-fb.c9
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c6
-rw-r--r--drivers/video/fbdev/hyperv_fb.c62
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c4
-rw-r--r--drivers/video/fbdev/msm/mddi_client_dummy.c19
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c6
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-hdmi.c19
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c20
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c5
-rw-r--r--drivers/video/fbdev/omap2/dss/Kconfig1
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c22
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c9
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi.h107
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4.c53
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.c164
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4_core.h1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5.c53
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5_core.c124
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_common.c316
-rw-r--r--drivers/video/fbdev/riva/fbdev.c6
-rw-r--r--drivers/video/fbdev/s3c-fb.c65
-rw-r--r--drivers/video/fbdev/s3c2410fb.c12
-rw-r--r--drivers/video/fbdev/sis/init.c2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c4
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c6
-rw-r--r--drivers/vme/bridges/vme_tsi148.c6
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/dw_wdt.c3
-rw-r--r--drivers/watchdog/imx2_wdt.c7
-rw-r--r--drivers/watchdog/lantiq_wdt.c5
-rw-r--r--drivers/watchdog/octeon-wdt-main.c62
-rw-r--r--drivers/watchdog/shwdt.c5
-rw-r--r--drivers/watchdog/sunxi_wdt.c29
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/events/events_fifo.c55
-rw-r--r--drivers/xen/grant-table.c309
-rw-r--r--drivers/xen/xen-pciback/xenbus.c1
1499 files changed, 80787 insertions, 37730 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 206942b8d105..d0f3265fb85d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -42,6 +42,12 @@ menuconfig ACPI
if ACPI
+config ACPI_LEGACY_TABLES_LOOKUP
+ bool
+
+config ARCH_MIGHT_HAVE_ACPI_PDC
+ bool
+
config ACPI_SLEEP
bool
depends on SUSPEND || HIBERNATION
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ea55e0179f81..505d4d79fe3e 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -36,6 +36,7 @@ acpi-y += scan.o
acpi-y += resource.o
acpi-y += acpi_processor.o
acpi-y += processor_core.o
+acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
acpi-y += pci_root.o pci_link.o pci_irq.o
diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
index 0ad6f389d922..b3842ffc19ba 100644
--- a/drivers/acpi/acpi_extlog.c
+++ b/drivers/acpi/acpi_extlog.c
@@ -71,11 +71,11 @@ static u32 l1_percpu_entry;
#define ELOG_ENTRY_ADDR(phyaddr) \
(phyaddr - elog_base + (u8 *)elog_addr)
-static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
+static struct acpi_hest_generic_status *extlog_elog_entry_check(int cpu, int bank)
{
int idx;
u64 data;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
WARN_ON(cpu < 0);
idx = ELOG_IDX(cpu, bank);
@@ -84,7 +84,7 @@ static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
return NULL;
data &= EXT_ELOG_ENTRY_MASK;
- estatus = (struct acpi_generic_status *)ELOG_ENTRY_ADDR(data);
+ estatus = (struct acpi_hest_generic_status *)ELOG_ENTRY_ADDR(data);
/* if no valid data in elog entry, just return */
if (estatus->block_status == 0)
@@ -94,7 +94,7 @@ static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank)
}
static void __print_extlog_rcd(const char *pfx,
- struct acpi_generic_status *estatus, int cpu)
+ struct acpi_hest_generic_status *estatus, int cpu)
{
static atomic_t seqno;
unsigned int curr_seqno;
@@ -113,7 +113,7 @@ static void __print_extlog_rcd(const char *pfx,
}
static int print_extlog_rcd(const char *pfx,
- struct acpi_generic_status *estatus, int cpu)
+ struct acpi_hest_generic_status *estatus, int cpu)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -139,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
struct mce *mce = (struct mce *)data;
int bank = mce->bank;
int cpu = mce->extcpu;
- struct acpi_generic_status *estatus, *tmp;
- struct acpi_generic_data *gdata;
+ struct acpi_hest_generic_status *estatus, *tmp;
+ struct acpi_hest_generic_data *gdata;
const uuid_le *fru_id = &NULL_UUID_LE;
char *fru_text = "";
uuid_le *sec_type;
@@ -154,7 +154,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
/* clear record status to enable BIOS to update it again */
estatus->block_status = 0;
- tmp = (struct acpi_generic_status *)elog_buf;
+ tmp = (struct acpi_hest_generic_status *)elog_buf;
if (!ras_userspace_consumers()) {
print_extlog_rcd(NULL, tmp, cpu);
@@ -163,7 +163,7 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
/* log event via trace */
err_seq++;
- gdata = (struct acpi_generic_data *)(tmp + 1);
+ gdata = (struct acpi_hest_generic_data *)(tmp + 1);
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (uuid_le *)gdata->fru_id;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 9cb65b0e7597..ce06149088c5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -113,6 +113,14 @@ static void lpss_i2c_setup(struct lpss_private_data *pdata)
writel(val, pdata->mmio_base + offset);
}
+static struct lpss_device_desc wpt_dev_desc = {
+ .clk_required = true,
+ .prv_offset = 0x800,
+ .ltr_required = true,
+ .clk_divider = true,
+ .clk_gate = true,
+};
+
static struct lpss_device_desc lpt_dev_desc = {
.clk_required = true,
.prv_offset = 0x800,
@@ -226,6 +234,8 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
{ "INT3437", },
+ { "INT3438", LPSS_ADDR(wpt_dev_desc) },
+
{ }
};
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 4ddb0dca56f6..996fa1959eea 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -12,6 +12,7 @@
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/ctype.h>
static const struct acpi_device_id acpi_pnp_device_ids[] = {
/* soc_button_array */
@@ -320,11 +321,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{""},
};
-static bool is_hex_digit(char c)
-{
- return (c >= 0 && c <= '9') || (c >= 'A' && c <= 'F');
-}
-
static bool matching_id(char *idstr, char *list_id)
{
int i;
@@ -335,7 +331,7 @@ static bool matching_id(char *idstr, char *list_id)
for (i = 3; i < 7; i++) {
char c = toupper(idstr[i]);
- if (!is_hex_digit(c)
+ if (!isxdigit(c)
|| (list_id[i] != 'X' && c != toupper(list_id[i])))
return false;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1c085742644f..1fdf5e07a1c7 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -268,7 +268,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->apic_id = apic_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
- if (!cpu0_initialized && !acpi_lapic) {
+ if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if ((cpu_index == -1) && (num_online_cpus() == 1))
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 8bb43f06e11f..c1a963581dc0 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -2,7 +2,7 @@
# Makefile for ACPICA Core interpreter
#
-ccflags-y := -Os
+ccflags-y := -Os -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
@@ -157,6 +157,7 @@ acpi-y += \
uterror.o \
uteval.o \
utglobal.o \
+ uthex.o \
utids.o \
utinit.o \
utlock.o \
@@ -175,5 +176,10 @@ acpi-y += \
utxferror.o \
utxfmutex.o
-acpi-$(ACPI_FUTURE_USAGE) += uttrack.o utcache.o
+acpi-$(ACPI_FUTURE_USAGE) += \
+ utcache.o \
+ utfileio.o \
+ utprint.o \
+ uttrack.o \
+ utuuid.o
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 8698ffba6f39..3d2c88289da9 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -79,10 +79,13 @@
/* Macros for usage messages */
#define ACPI_USAGE_HEADER(usage) \
- printf ("Usage: %s\nOptions:\n", usage);
+ acpi_os_printf ("Usage: %s\nOptions:\n", usage);
+
+#define ACPI_USAGE_TEXT(description) \
+ acpi_os_printf (description);
#define ACPI_OPTION(name, description) \
- printf (" %-18s%s\n", name, description);
+ acpi_os_printf (" %-18s%s\n", name, description);
#define FILE_SUFFIX_DISASSEMBLY "dsl"
#define ACPI_TABLE_FILE_SUFFIX ".dat"
@@ -102,7 +105,7 @@ extern char *acpi_gbl_optarg;
/*
* cmfsize - Common get file size function
*/
-u32 cm_get_file_size(FILE * file);
+u32 cm_get_file_size(ACPI_FILE file);
#ifndef ACPI_DUMP_APP
/*
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 68a91eb0fa48..1d026ff1683f 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -233,9 +233,6 @@ acpi_status acpi_db_load_acpi_table(char *filename);
acpi_status
acpi_db_get_table_from_file(char *filename, struct acpi_table_header **table);
-acpi_status
-acpi_db_read_table_from_file(char *filename, struct acpi_table_header **table);
-
/*
* dbhistry - debugger HISTORY command
*/
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 115eedcade1e..ebf02cc10a43 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -297,7 +297,7 @@ ACPI_GLOBAL(u32, acpi_gbl_trace_dbg_layer);
*
****************************************************************************/
-ACPI_GLOBAL(u8, acpi_gbl_db_output_flags);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_db_output_flags, ACPI_DB_CONSOLE_OUTPUT);
#ifdef ACPI_DISASSEMBLER
@@ -362,6 +362,12 @@ ACPI_GLOBAL(u32, acpi_gbl_num_objects);
#ifdef ACPI_APPLICATION
ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_debug_file, NULL);
+ACPI_INIT_GLOBAL(ACPI_FILE, acpi_gbl_output_file, NULL);
+
+/* Print buffer */
+
+ACPI_GLOBAL(acpi_spinlock, acpi_gbl_print_lock); /* For print buffer */
+ACPI_GLOBAL(char, acpi_gbl_print_buffer[1024]);
#endif /* ACPI_APPLICATION */
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 91f801a2e689..1f9aba5fb81f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -730,12 +730,13 @@ union acpi_parse_value {
#define ACPI_DASM_STRING 0x02 /* Buffer is a ASCII string */
#define ACPI_DASM_UNICODE 0x03 /* Buffer is a Unicode string */
#define ACPI_DASM_PLD_METHOD 0x04 /* Buffer is a _PLD method bit-packed buffer */
-#define ACPI_DASM_EISAID 0x05 /* Integer is an EISAID */
-#define ACPI_DASM_MATCHOP 0x06 /* Parent opcode is a Match() operator */
-#define ACPI_DASM_LNOT_PREFIX 0x07 /* Start of a Lnot_equal (etc.) pair of opcodes */
-#define ACPI_DASM_LNOT_SUFFIX 0x08 /* End of a Lnot_equal (etc.) pair of opcodes */
-#define ACPI_DASM_HID_STRING 0x09 /* String is a _HID or _CID */
-#define ACPI_DASM_IGNORE 0x0A /* Not used at this time */
+#define ACPI_DASM_UUID 0x05 /* Buffer is a UUID/GUID */
+#define ACPI_DASM_EISAID 0x06 /* Integer is an EISAID */
+#define ACPI_DASM_MATCHOP 0x07 /* Parent opcode is a Match() operator */
+#define ACPI_DASM_LNOT_PREFIX 0x08 /* Start of a Lnot_equal (etc.) pair of opcodes */
+#define ACPI_DASM_LNOT_SUFFIX 0x09 /* End of a Lnot_equal (etc.) pair of opcodes */
+#define ACPI_DASM_HID_STRING 0x0A /* String is a _HID or _CID */
+#define ACPI_DASM_IGNORE 0x0B /* Not used at this time */
/*
* Generic operation (for example: If, While, Store)
@@ -1154,4 +1155,9 @@ struct ah_device_id {
char *description;
};
+struct ah_uuid {
+ char *description;
+ char *string;
+};
+
#endif /* __ACLOCAL_H__ */
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index bd08817cafd8..bd3908d26c4f 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -105,6 +105,11 @@
* count = 0 (optional)
* (Used for _DLM)
*
+ * ACPI_PTYPE2_UUID_PAIR: Each subpackage is preceded by a UUID Buffer. The UUID
+ * defines the format of the package. Zero-length parent package is
+ * allowed.
+ * (Used for _DSD)
+ *
*****************************************************************************/
enum acpi_return_package_types {
@@ -117,7 +122,8 @@ enum acpi_return_package_types {
ACPI_PTYPE2_FIXED = 7,
ACPI_PTYPE2_MIN = 8,
ACPI_PTYPE2_REV_FIXED = 9,
- ACPI_PTYPE2_FIX_VAR = 10
+ ACPI_PTYPE2_FIX_VAR = 10,
+ ACPI_PTYPE2_UUID_PAIR = 11
};
/* Support macros for users of the predefined info table */
@@ -364,6 +370,9 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_CBA", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See PCI firmware spec 3.0 */
+ {{"_CCA", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* ACPI 5.1 */
+
{{"_CDM", METHOD_0ARGS,
METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
@@ -436,6 +445,11 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DOS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
METHOD_NO_RETURN_VALUE}},
+ {{"_DSD", METHOD_0ARGS,
+ METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Variable-length (Pkgs) each: 1 Buf, 1 Pkg */
+ PACKAGE_INFO(ACPI_PTYPE2_UUID_PAIR, ACPI_RTYPE_BUFFER, 1,
+ ACPI_RTYPE_PACKAGE, 1, 0),
+
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
ACPI_TYPE_PACKAGE),
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 1e256c5bda20..486d342e74b6 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -95,7 +95,6 @@ extern const char *acpi_gbl_pt_decode[];
#ifdef ACPI_ASL_COMPILER
#include <stdio.h>
-extern FILE *acpi_gbl_output_file;
#define ACPI_MSG_REDIRECT_BEGIN \
FILE *output_file = acpi_gbl_output_file; \
@@ -195,6 +194,8 @@ char *acpi_ut_get_event_name(u32 event_id);
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position);
+u8 acpi_ut_ascii_char_to_hex(int hex_char);
+
u8 acpi_ut_valid_object_type(acpi_object_type type);
/*
@@ -211,6 +212,8 @@ void acpi_ut_subsystem_shutdown(void);
acpi_size acpi_ut_strlen(const char *string);
+char *acpi_ut_strchr(const char *string, int ch);
+
char *acpi_ut_strcpy(char *dst_string, const char *src_string);
char *acpi_ut_strncpy(char *dst_string,
@@ -257,7 +260,7 @@ extern const u8 _acpi_ctype[];
#define ACPI_IS_XDIGIT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_XD))
#define ACPI_IS_UPPER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_UP))
#define ACPI_IS_LOWER(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO))
-#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_SP | _ACPI_PU))
+#define ACPI_IS_PRINT(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP | _ACPI_DI | _ACPI_XS | _ACPI_PU))
#define ACPI_IS_ALPHA(c) (_acpi_ctype[(unsigned char)(c)] & (_ACPI_LO | _ACPI_UP))
#endif /* !ACPI_USE_SYSTEM_CLIBRARY */
@@ -352,6 +355,13 @@ acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id);
void acpi_ut_dump_buffer(u8 *buffer, u32 count, u32 display, u32 offset);
+#ifdef ACPI_APPLICATION
+void
+acpi_ut_dump_buffer_to_file(ACPI_FILE file,
+ u8 *buffer,
+ u32 count, u32 display, u32 base_offset);
+#endif
+
void acpi_ut_report_error(char *module_name, u32 line_number);
void acpi_ut_report_info(char *module_name, u32 line_number);
@@ -394,6 +404,14 @@ acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
u8 method_count, u8 *out_values);
/*
+ * utfileio - file operations
+ */
+#ifdef ACPI_APPLICATION
+acpi_status
+acpi_ut_read_table_from_file(char *filename, struct acpi_table_header **table);
+#endif
+
+/*
* utids - device ID support
*/
acpi_status
@@ -743,4 +761,30 @@ const struct ah_predefined_name *acpi_ah_match_predefined_name(char *nameseg);
const struct ah_device_id *acpi_ah_match_hardware_id(char *hid);
+const char *acpi_ah_match_uuid(u8 *data);
+
+/*
+ * utprint - printf/vprintf output functions
+ */
+const char *acpi_ut_scan_number(const char *string, u64 *number_ptr);
+
+const char *acpi_ut_print_number(char *string, u64 number);
+
+int
+acpi_ut_vsnprintf(char *string,
+ acpi_size size, const char *format, va_list args);
+
+int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...);
+
+#ifdef ACPI_APPLICATION
+int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args);
+
+int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...);
+#endif
+
+/*
+ * utuuid -- UUID support functions
+ */
+void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer);
+
#endif /* _ACUTILS_H */
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 48f70013b488..e4ba4dec86af 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -698,21 +698,6 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
}
/*
- * If edge-triggered, clear the GPE status bit now. Note that
- * level-triggered events are cleared after the GPE is serviced.
- */
- if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
- ACPI_GPE_EDGE_TRIGGERED) {
- status = acpi_hw_clear_gpe(gpe_event_info);
- if (ACPI_FAILURE(status)) {
- ACPI_EXCEPTION((AE_INFO, status,
- "Unable to clear GPE %02X",
- gpe_number));
- return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
- }
- }
-
- /*
* Always disable the GPE so that it does not keep firing before
* any asynchronous activity completes (either from the execution
* of a GPE method or an asynchronous GPE handler.)
@@ -729,6 +714,23 @@ acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
}
/*
+ * If edge-triggered, clear the GPE status bit now. Note that
+ * level-triggered events are cleared after the GPE is serviced.
+ */
+ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
+ ACPI_GPE_EDGE_TRIGGERED) {
+ status = acpi_hw_clear_gpe(gpe_event_info);
+ if (ACPI_FAILURE(status)) {
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Unable to clear GPE %02X",
+ gpe_number));
+ (void)acpi_hw_low_set_gpe(gpe_event_info,
+ ACPI_GPE_CONDITIONAL_ENABLE);
+ return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
+ }
+ }
+
+ /*
* Dispatch the GPE to either an installed handler or the control
* method associated with this GPE (_Lxx or _Exx). If a handler
* exists, we invoke it and do not attempt to run the method.
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index cb534faf5369..0cf159cc6e6d 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -126,11 +126,19 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
- /* Ensure that we have a valid GPE number */
-
+ /*
+ * Ensure that we have a valid GPE number and that there is some way
+ * of handling the GPE (handler or a GPE method). In other words, we
+ * won't allow a valid GPE to be enabled if there is no way to handle it.
+ */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (gpe_event_info) {
- status = acpi_ev_add_gpe_reference(gpe_event_info);
+ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
+ ACPI_GPE_DISPATCH_NONE) {
+ status = acpi_ev_add_gpe_reference(gpe_event_info);
+ } else {
+ status = AE_NO_HANDLER;
+ }
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
@@ -179,6 +187,53 @@ ACPI_EXPORT_SYMBOL(acpi_disable_gpe)
/*******************************************************************************
*
+ * FUNCTION: acpi_mark_gpe_for_wake
+ *
+ * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
+ * gpe_number - GPE level within the GPE block
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Mark a GPE as having the ability to wake the system. Simply
+ * sets the ACPI_GPE_CAN_WAKE flag.
+ *
+ * Some potential callers of acpi_setup_gpe_for_wake may know in advance that
+ * there won't be any notify handlers installed for device wake notifications
+ * from the given GPE (one example is a button GPE in Linux). For these cases,
+ * acpi_mark_gpe_for_wake should be used instead of acpi_setup_gpe_for_wake.
+ * This will set the ACPI_GPE_CAN_WAKE flag for the GPE without trying to
+ * setup implicit wake notification for it (since there's no handler method).
+ *
+ ******************************************************************************/
+acpi_status acpi_mark_gpe_for_wake(acpi_handle gpe_device, u32 gpe_number)
+{
+ struct acpi_gpe_event_info *gpe_event_info;
+ acpi_status status = AE_BAD_PARAMETER;
+ acpi_cpu_flags flags;
+
+ ACPI_FUNCTION_TRACE(acpi_mark_gpe_for_wake);
+
+ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+ /* Ensure that we have a valid GPE number */
+
+ gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
+ if (gpe_event_info) {
+
+ /* Mark the GPE as a possible wake event */
+
+ gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+ status = AE_OK;
+ }
+
+ acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_mark_gpe_for_wake)
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_setup_gpe_for_wake
*
* PARAMETERS: wake_device - Device associated with the GPE (via _PRW)
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index 4cfc3d3b5c97..6fbfad47518c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -75,6 +75,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
u32 level, u32 index)
{
u32 i;
+ u32 timer;
ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
@@ -86,11 +87,19 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
}
/*
+ * We will emit the current timer value (in microseconds) with each
+ * debug output. Only need the lower 26 bits. This allows for 67
+ * million microseconds or 67 seconds before rollover.
+ */
+ timer = ((u32)acpi_os_get_timer() / 10); /* (100 nanoseconds to microseconds) */
+ timer &= 0x03FFFFFF;
+
+ /*
* Print line header as long as we are not in the middle of an
* object display
*/
if (!((level > 0) && index == 0)) {
- acpi_os_printf("[ACPI Debug] %*s", level, " ");
+ acpi_os_printf("[ACPI Debug %.8u] %*s", timer, level, " ");
}
/* Display the index for package output only */
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 925202acc3e4..0f23c3f2678e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -494,7 +494,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
}
}
- acpi_os_printf("\n", next);
+ acpi_os_printf("\n");
break;
case ACPI_EXD_HDLR_LIST:
@@ -528,7 +528,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
}
}
- acpi_os_printf("\n", next);
+ acpi_os_printf("\n");
break;
case ACPI_EXD_RGN_LIST:
@@ -562,7 +562,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
}
}
- acpi_os_printf("\n", next);
+ acpi_os_printf("\n");
break;
case ACPI_EXD_NODE:
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 12878e1982f7..6907ce0c704c 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -56,7 +56,7 @@ acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length);
/*******************************************************************************
*
- * FUNCTION: acpi_get_serial_access_bytes
+ * FUNCTION: acpi_ex_get_serial_access_length
*
* PARAMETERS: accessor_type - The type of the protocol indicated by region
* field access attributes
@@ -103,7 +103,7 @@ acpi_ex_get_serial_access_length(u32 accessor_type, u32 access_length)
case AML_FIELD_ATTRIB_BLOCK_CALL:
default:
- length = ACPI_GSBUS_BUFFER_SIZE;
+ length = ACPI_GSBUS_BUFFER_SIZE - 2;
break;
}
@@ -186,12 +186,11 @@ acpi_ex_read_data_from_field(struct acpi_walk_state * walk_state,
access_length);
/*
- * Add additional 2 bytes for modeled generic_serial_bus data buffer:
- * typedef struct {
- * BYTEStatus; // Byte 0 of the data buffer
- * BYTELength; // Byte 1 of the data buffer
- * BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
- * }
+ * Add additional 2 bytes for the generic_serial_bus data buffer:
+ *
+ * Status; (Byte 0 of the data buffer)
+ * Length; (Byte 1 of the data buffer)
+ * Data[x-1]; (Bytes 2-x of the arbitrary length data buffer)
*/
length += 2;
function = ACPI_READ | (accessor_type << 16);
@@ -368,12 +367,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
access_length);
/*
- * Add additional 2 bytes for modeled generic_serial_bus data buffer:
- * typedef struct {
- * BYTEStatus; // Byte 0 of the data buffer
- * BYTELength; // Byte 1 of the data buffer
- * BYTE[x-1]Data; // Bytes 2-x of the arbitrary length data buffer,
- * }
+ * Add additional 2 bytes for the generic_serial_bus data buffer:
+ *
+ * Status; (Byte 0 of the data buffer)
+ * Length; (Byte 1 of the data buffer)
+ * Data[x-1]; (Bytes 2-x of the arbitrary length data buffer)
*/
length += 2;
function = ACPI_WRITE | (accessor_type << 16);
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index e0fd9b4978cd..a4c34d2c556b 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -278,8 +278,9 @@ acpi_status acpi_hw_clear_acpi_status(void)
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
goto exit;
+ }
/* Clear the GPE Bits in all GPE registers in all GPE blocks */
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index fe54a8c73b8c..a42ee9d6970d 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -237,6 +237,16 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
(node->object->common.type != ACPI_TYPE_LOCAL_DATA)) {
node->object = node->object->common.next_object;
}
+
+ /*
+ * Detach the object from any data objects (which are still held by
+ * the namespace node)
+ */
+ if (obj_desc->common.next_object &&
+ ((obj_desc->common.next_object)->common.type ==
+ ACPI_TYPE_LOCAL_DATA)) {
+ obj_desc->common.next_object = NULL;
+ }
}
/* Reset the node type to untyped */
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index 3c1699740653..038ea887f562 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -199,3 +199,131 @@ acpi_ut_debug_dump_buffer(u8 *buffer, u32 count, u32 display, u32 component_id)
acpi_ut_dump_buffer(buffer, count, display, 0);
}
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_dump_buffer_to_file
+ *
+ * PARAMETERS: file - File descriptor
+ * buffer - Buffer to dump
+ * count - Amount to dump, in bytes
+ * display - BYTE, WORD, DWORD, or QWORD display:
+ * DB_BYTE_DISPLAY
+ * DB_WORD_DISPLAY
+ * DB_DWORD_DISPLAY
+ * DB_QWORD_DISPLAY
+ * base_offset - Beginning buffer offset (display only)
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Generic dump buffer in both hex and ascii to a file.
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_dump_buffer_to_file(ACPI_FILE file,
+ u8 *buffer, u32 count, u32 display, u32 base_offset)
+{
+ u32 i = 0;
+ u32 j;
+ u32 temp32;
+ u8 buf_char;
+
+ if (!buffer) {
+ acpi_ut_file_printf(file,
+ "Null Buffer Pointer in DumpBuffer!\n");
+ return;
+ }
+
+ if ((count < 4) || (count & 0x01)) {
+ display = DB_BYTE_DISPLAY;
+ }
+
+ /* Nasty little dump buffer routine! */
+
+ while (i < count) {
+
+ /* Print current offset */
+
+ acpi_ut_file_printf(file, "%6.4X: ", (base_offset + i));
+
+ /* Print 16 hex chars */
+
+ for (j = 0; j < 16;) {
+ if (i + j >= count) {
+
+ /* Dump fill spaces */
+
+ acpi_ut_file_printf(file, "%*s",
+ ((display * 2) + 1), " ");
+ j += display;
+ continue;
+ }
+
+ switch (display) {
+ case DB_BYTE_DISPLAY:
+ default: /* Default is BYTE display */
+
+ acpi_ut_file_printf(file, "%02X ",
+ buffer[(acpi_size) i + j]);
+ break;
+
+ case DB_WORD_DISPLAY:
+
+ ACPI_MOVE_16_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_ut_file_printf(file, "%04X ", temp32);
+ break;
+
+ case DB_DWORD_DISPLAY:
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_ut_file_printf(file, "%08X ", temp32);
+ break;
+
+ case DB_QWORD_DISPLAY:
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j]);
+ acpi_ut_file_printf(file, "%08X", temp32);
+
+ ACPI_MOVE_32_TO_32(&temp32,
+ &buffer[(acpi_size) i + j +
+ 4]);
+ acpi_ut_file_printf(file, "%08X ", temp32);
+ break;
+ }
+
+ j += display;
+ }
+
+ /*
+ * Print the ASCII equivalent characters but watch out for the bad
+ * unprintable ones (printable chars are 0x20 through 0x7E)
+ */
+ acpi_ut_file_printf(file, " ");
+ for (j = 0; j < 16; j++) {
+ if (i + j >= count) {
+ acpi_ut_file_printf(file, "\n");
+ return;
+ }
+
+ buf_char = buffer[(acpi_size) i + j];
+ if (ACPI_IS_PRINT(buf_char)) {
+ acpi_ut_file_printf(file, "%c", buf_char);
+ } else {
+ acpi_ut_file_printf(file, ".");
+ }
+ }
+
+ /* Done with that line. */
+
+ acpi_ut_file_printf(file, "\n");
+ i += 16;
+ }
+
+ return;
+}
+#endif
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 270c16464dd9..ff601c0f7c7a 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -1001,5 +1001,11 @@ acpi_ut_copy_iobject_to_iobject(union acpi_operand_object *source_desc,
status = acpi_ut_copy_simple_object(source_desc, *dest_desc);
}
+ /* Delete the allocated object if copy failed */
+
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_remove_reference(*dest_desc);
+ }
+
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index 21a20ac5b1e1..e516254c63b2 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -561,3 +561,29 @@ acpi_ut_ptr_exit(u32 line_number,
}
#endif
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_log_error
+ *
+ * PARAMETERS: format - Printf format field
+ * ... - Optional printf arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Print error message to the console, used by applications.
+ *
+ ******************************************************************************/
+
+void ACPI_INTERNAL_VAR_XFACE acpi_log_error(const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+ (void)acpi_ut_file_vprintf(ACPI_FILE_ERR, format, args);
+ va_end(args);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_log_error)
+#endif
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 90ec37c473c6..40e923e675fc 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -88,33 +88,6 @@ const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
/*******************************************************************************
*
- * FUNCTION: acpi_ut_hex_to_ascii_char
- *
- * PARAMETERS: integer - Contains the hex digit
- * position - bit position of the digit within the
- * integer (multiple of 4)
- *
- * RETURN: The converted Ascii character
- *
- * DESCRIPTION: Convert a hex digit to an Ascii character
- *
- ******************************************************************************/
-
-/* Hex to ASCII conversion table */
-
-static const char acpi_gbl_hex_to_ascii[] = {
- '0', '1', '2', '3', '4', '5', '6', '7',
- '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
-};
-
-char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
-{
-
- return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ut_get_region_name
*
* PARAMETERS: Space ID - ID for the region
@@ -475,7 +448,8 @@ static const char *acpi_gbl_generic_notify[ACPI_NOTIFY_MAX + 1] = {
/* 09 */ "Device PLD Check",
/* 0A */ "Reserved",
/* 0B */ "System Locality Update",
- /* 0C */ "Shutdown Request"
+ /* 0C */ "Shutdown Request",
+ /* 0D */ "System Resource Affinity Update"
};
static const char *acpi_gbl_device_notify[4] = {
@@ -502,7 +476,7 @@ static const char *acpi_gbl_thermal_notify[4] = {
const char *acpi_ut_get_notify_name(u32 notify_value, acpi_object_type type)
{
- /* 00 - 0C are common to all object types */
+ /* 00 - 0D are common to all object types */
if (notify_value <= ACPI_NOTIFY_MAX) {
return (acpi_gbl_generic_notify[notify_value]);
diff --git a/drivers/acpi/acpica/utfileio.c b/drivers/acpi/acpica/utfileio.c
new file mode 100644
index 000000000000..4e263a8cc6f0
--- /dev/null
+++ b/drivers/acpi/acpica/utfileio.c
@@ -0,0 +1,331 @@
+/*******************************************************************************
+ *
+ * Module Name: utfileio - simple file I/O routines
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "actables.h"
+#include "acapps.h"
+
+#ifdef ACPI_ASL_COMPILER
+#include "aslcompiler.h"
+#endif
+
+#define _COMPONENT ACPI_CA_DEBUGGER
+ACPI_MODULE_NAME("utfileio")
+
+#ifdef ACPI_APPLICATION
+/* Local prototypes */
+static acpi_status
+acpi_ut_check_text_mode_corruption(u8 *table,
+ u32 table_length, u32 file_length);
+
+static acpi_status
+acpi_ut_read_table(FILE * fp,
+ struct acpi_table_header **table, u32 *table_length);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_check_text_mode_corruption
+ *
+ * PARAMETERS: table - Table buffer
+ * table_length - Length of table from the table header
+ * file_length - Length of the file that contains the table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Check table for text mode file corruption where all linefeed
+ * characters (LF) have been replaced by carriage return linefeed
+ * pairs (CR/LF).
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_check_text_mode_corruption(u8 *table, u32 table_length, u32 file_length)
+{
+ u32 i;
+ u32 pairs = 0;
+
+ if (table_length != file_length) {
+ ACPI_WARNING((AE_INFO,
+ "File length (0x%X) is not the same as the table length (0x%X)",
+ file_length, table_length));
+ }
+
+ /* Scan entire table to determine if each LF has been prefixed with a CR */
+
+ for (i = 1; i < file_length; i++) {
+ if (table[i] == 0x0A) {
+ if (table[i - 1] != 0x0D) {
+
+ /* The LF does not have a preceding CR, table not corrupted */
+
+ return (AE_OK);
+ } else {
+ /* Found a CR/LF pair */
+
+ pairs++;
+ }
+ i++;
+ }
+ }
+
+ if (!pairs) {
+ return (AE_OK);
+ }
+
+ /*
+ * Entire table scanned, each CR is part of a CR/LF pair --
+ * meaning that the table was treated as a text file somewhere.
+ *
+ * NOTE: We can't "fix" the table, because any existing CR/LF pairs in the
+ * original table are left untouched by the text conversion process --
+ * meaning that we cannot simply replace CR/LF pairs with LFs.
+ */
+ acpi_os_printf("Table has been corrupted by text mode conversion\n");
+ acpi_os_printf("All LFs (%u) were changed to CR/LF pairs\n", pairs);
+ acpi_os_printf("Table cannot be repaired!\n");
+ return (AE_BAD_VALUE);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_read_table
+ *
+ * PARAMETERS: fp - File that contains table
+ * table - Return value, buffer with table
+ * table_length - Return value, length of table
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Load the DSDT from the file pointer
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_read_table(FILE * fp,
+ struct acpi_table_header **table, u32 *table_length)
+{
+ struct acpi_table_header table_header;
+ u32 actual;
+ acpi_status status;
+ u32 file_size;
+ u8 standard_header = TRUE;
+ s32 count;
+
+ /* Get the file size */
+
+ file_size = cm_get_file_size(fp);
+ if (file_size == ACPI_UINT32_MAX) {
+ return (AE_ERROR);
+ }
+
+ if (file_size < 4) {
+ return (AE_BAD_HEADER);
+ }
+
+ /* Read the signature */
+
+ fseek(fp, 0, SEEK_SET);
+
+ count = fread(&table_header, 1, sizeof(struct acpi_table_header), fp);
+ if (count != sizeof(struct acpi_table_header)) {
+ acpi_os_printf("Could not read the table header\n");
+ return (AE_BAD_HEADER);
+ }
+
+ /* The RSDP table does not have standard ACPI header */
+
+ if (ACPI_VALIDATE_RSDP_SIG(table_header.signature)) {
+ *table_length = file_size;
+ standard_header = FALSE;
+ } else {
+
+#if 0
+ /* Validate the table header/length */
+
+ status = acpi_tb_validate_table_header(&table_header);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Table header is invalid!\n");
+ return (status);
+ }
+#endif
+
+ /* File size must be at least as long as the Header-specified length */
+
+ if (table_header.length > file_size) {
+ acpi_os_printf
+ ("TableHeader length [0x%X] greater than the input file size [0x%X]\n",
+ table_header.length, file_size);
+
+#ifdef ACPI_ASL_COMPILER
+ status = fl_check_for_ascii(fp, NULL, FALSE);
+ if (ACPI_SUCCESS(status)) {
+ acpi_os_printf
+ ("File appears to be ASCII only, must be binary\n");
+ }
+#endif
+ return (AE_BAD_HEADER);
+ }
+#ifdef ACPI_OBSOLETE_CODE
+ /* We only support a limited number of table types */
+
+ if (!ACPI_COMPARE_NAME
+ ((char *)table_header.signature, ACPI_SIG_DSDT)
+ && !ACPI_COMPARE_NAME((char *)table_header.signature,
+ ACPI_SIG_PSDT)
+ && !ACPI_COMPARE_NAME((char *)table_header.signature,
+ ACPI_SIG_SSDT)) {
+ acpi_os_printf
+ ("Table signature [%4.4s] is invalid or not supported\n",
+ (char *)table_header.signature);
+ ACPI_DUMP_BUFFER(&table_header,
+ sizeof(struct acpi_table_header));
+ return (AE_ERROR);
+ }
+#endif
+
+ *table_length = table_header.length;
+ }
+
+ /* Allocate a buffer for the table */
+
+ *table = acpi_os_allocate((size_t) file_size);
+ if (!*table) {
+ acpi_os_printf
+ ("Could not allocate memory for ACPI table %4.4s (size=0x%X)\n",
+ table_header.signature, *table_length);
+ return (AE_NO_MEMORY);
+ }
+
+ /* Get the rest of the table */
+
+ fseek(fp, 0, SEEK_SET);
+ actual = fread(*table, 1, (size_t) file_size, fp);
+ if (actual == file_size) {
+ if (standard_header) {
+
+ /* Now validate the checksum */
+
+ status = acpi_tb_verify_checksum((void *)*table,
+ ACPI_CAST_PTR(struct
+ acpi_table_header,
+ *table)->
+ length);
+
+ if (status == AE_BAD_CHECKSUM) {
+ status =
+ acpi_ut_check_text_mode_corruption((u8 *)
+ *table,
+ file_size,
+ (*table)->
+ length);
+ return (status);
+ }
+ }
+ return (AE_OK);
+ }
+
+ if (actual > 0) {
+ acpi_os_printf("Warning - reading table, asked for %X got %X\n",
+ file_size, actual);
+ return (AE_OK);
+ }
+
+ acpi_os_printf("Error - could not read the table file\n");
+ acpi_os_free(*table);
+ *table = NULL;
+ *table_length = 0;
+ return (AE_ERROR);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_read_table_from_file
+ *
+ * PARAMETERS: filename - File where table is located
+ * table - Where a pointer to the table is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get an ACPI table from a file
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_read_table_from_file(char *filename, struct acpi_table_header ** table)
+{
+ FILE *file;
+ u32 file_size;
+ u32 table_length;
+ acpi_status status = AE_ERROR;
+
+ /* Open the file, get current size */
+
+ file = fopen(filename, "rb");
+ if (!file) {
+ perror("Could not open input file");
+ return (status);
+ }
+
+ file_size = cm_get_file_size(file);
+ if (file_size == ACPI_UINT32_MAX) {
+ goto exit;
+ }
+
+ /* Get the entire file */
+
+ fprintf(stderr,
+ "Loading Acpi table from file %10s - Length %.8u (%06X)\n",
+ filename, file_size, file_size);
+
+ status = acpi_ut_read_table(file, table, &table_length);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf("Could not get table from the file\n");
+ }
+
+exit:
+ fclose(file);
+ return (status);
+}
+
+#endif
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index d69be3cb3fae..77ceac715f28 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -214,152 +214,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
};
#endif /* !ACPI_REDUCED_HARDWARE */
-/*******************************************************************************
- *
- * FUNCTION: acpi_ut_init_globals
- *
- * PARAMETERS: None
- *
- * RETURN: Status
- *
- * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
- * initialization should be initialized here. This allows for
- * a warm restart.
- *
- ******************************************************************************/
-
-acpi_status acpi_ut_init_globals(void)
-{
- acpi_status status;
- u32 i;
-
- ACPI_FUNCTION_TRACE(ut_init_globals);
-
- /* Create all memory caches */
-
- status = acpi_ut_create_caches();
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /* Address Range lists */
-
- for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
- acpi_gbl_address_range_list[i] = NULL;
- }
-
- /* Mutex locked flags */
-
- for (i = 0; i < ACPI_NUM_MUTEX; i++) {
- acpi_gbl_mutex_info[i].mutex = NULL;
- acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
- acpi_gbl_mutex_info[i].use_count = 0;
- }
-
- for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
- acpi_gbl_owner_id_mask[i] = 0;
- }
-
- /* Last owner_ID is never valid */
-
- acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
-
- /* Event counters */
-
- acpi_method_count = 0;
- acpi_sci_count = 0;
- acpi_gpe_count = 0;
-
- for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
- acpi_fixed_event_count[i] = 0;
- }
-
-#if (!ACPI_REDUCED_HARDWARE)
-
- /* GPE/SCI support */
-
- acpi_gbl_all_gpes_initialized = FALSE;
- acpi_gbl_gpe_xrupt_list_head = NULL;
- acpi_gbl_gpe_fadt_blocks[0] = NULL;
- acpi_gbl_gpe_fadt_blocks[1] = NULL;
- acpi_current_gpe_count = 0;
-
- acpi_gbl_global_event_handler = NULL;
- acpi_gbl_sci_handler_list = NULL;
-
-#endif /* !ACPI_REDUCED_HARDWARE */
-
- /* Global handlers */
-
- acpi_gbl_global_notify[0].handler = NULL;
- acpi_gbl_global_notify[1].handler = NULL;
- acpi_gbl_exception_handler = NULL;
- acpi_gbl_init_handler = NULL;
- acpi_gbl_table_handler = NULL;
- acpi_gbl_interface_handler = NULL;
-
- /* Global Lock support */
-
- acpi_gbl_global_lock_semaphore = NULL;
- acpi_gbl_global_lock_mutex = NULL;
- acpi_gbl_global_lock_acquired = FALSE;
- acpi_gbl_global_lock_handle = 0;
- acpi_gbl_global_lock_present = FALSE;
-
- /* Miscellaneous variables */
-
- acpi_gbl_DSDT = NULL;
- acpi_gbl_cm_single_step = FALSE;
- acpi_gbl_shutdown = FALSE;
- acpi_gbl_ns_lookup_count = 0;
- acpi_gbl_ps_find_count = 0;
- acpi_gbl_acpi_hardware_present = TRUE;
- acpi_gbl_last_owner_id_index = 0;
- acpi_gbl_next_owner_id_offset = 0;
- acpi_gbl_trace_dbg_level = 0;
- acpi_gbl_trace_dbg_layer = 0;
- acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
- acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
- acpi_gbl_osi_mutex = NULL;
- acpi_gbl_reg_methods_executed = FALSE;
-
- /* Hardware oriented */
-
- acpi_gbl_events_initialized = FALSE;
- acpi_gbl_system_awake_and_running = TRUE;
-
- /* Namespace */
-
- acpi_gbl_module_code_list = NULL;
- acpi_gbl_root_node = NULL;
- acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
- acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
- acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
- acpi_gbl_root_node_struct.parent = NULL;
- acpi_gbl_root_node_struct.child = NULL;
- acpi_gbl_root_node_struct.peer = NULL;
- acpi_gbl_root_node_struct.object = NULL;
-
-#ifdef ACPI_DISASSEMBLER
- acpi_gbl_external_list = NULL;
- acpi_gbl_num_external_methods = 0;
- acpi_gbl_resolved_external_methods = 0;
-#endif
-
-#ifdef ACPI_DEBUG_OUTPUT
- acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
-#endif
-
-#ifdef ACPI_DBG_TRACK_ALLOCATIONS
- acpi_gbl_display_final_mem_stats = FALSE;
- acpi_gbl_disable_mem_tracking = FALSE;
-#endif
-
- ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = FALSE);
-
- return_ACPI_STATUS(AE_OK);
-}
-
/* Public globals */
ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
new file mode 100644
index 000000000000..9afa9441b183
--- /dev/null
+++ b/drivers/acpi/acpica/uthex.c
@@ -0,0 +1,100 @@
+/******************************************************************************
+ *
+ * Module Name: uthex -- Hex/ASCII support functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_COMPILER
+ACPI_MODULE_NAME("uthex")
+
+/* Hex to ASCII conversion table */
+static char acpi_gbl_hex_to_ascii[] = {
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D',
+ 'E', 'F'
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_hex_to_ascii_char
+ *
+ * PARAMETERS: integer - Contains the hex digit
+ * position - bit position of the digit within the
+ * integer (multiple of 4)
+ *
+ * RETURN: The converted Ascii character
+ *
+ * DESCRIPTION: Convert a hex digit to an Ascii character
+ *
+ ******************************************************************************/
+
+char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
+{
+
+ return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_hex_char_to_value
+ *
+ * PARAMETERS: ascii_char - Hex character in Ascii
+ *
+ * RETURN: The binary value of the ascii/hex character
+ *
+ * DESCRIPTION: Perform ascii-to-hex translation
+ *
+ ******************************************************************************/
+
+u8 acpi_ut_ascii_char_to_hex(int hex_char)
+{
+
+ if (hex_char <= 0x39) {
+ return ((u8)(hex_char - 0x30));
+ }
+
+ if (hex_char <= 0x46) {
+ return ((u8)(hex_char - 0x37));
+ }
+
+ return ((u8)(hex_char - 0x57));
+}
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 5f56fc49021e..77120ec9ea86 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -102,6 +102,151 @@ static void acpi_ut_free_gpe_lists(void)
}
#endif /* !ACPI_REDUCED_HARDWARE */
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_init_globals
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Initialize ACPICA globals. All globals that require specific
+ * initialization should be initialized here. This allows for
+ * a warm restart.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_init_globals(void)
+{
+ acpi_status status;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ut_init_globals);
+
+ /* Create all memory caches */
+
+ status = acpi_ut_create_caches();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Address Range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+
+ /* Mutex locked flags */
+
+ for (i = 0; i < ACPI_NUM_MUTEX; i++) {
+ acpi_gbl_mutex_info[i].mutex = NULL;
+ acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
+ acpi_gbl_mutex_info[i].use_count = 0;
+ }
+
+ for (i = 0; i < ACPI_NUM_OWNERID_MASKS; i++) {
+ acpi_gbl_owner_id_mask[i] = 0;
+ }
+
+ /* Last owner_ID is never valid */
+
+ acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS - 1] = 0x80000000;
+
+ /* Event counters */
+
+ acpi_method_count = 0;
+ acpi_sci_count = 0;
+ acpi_gpe_count = 0;
+
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+ acpi_fixed_event_count[i] = 0;
+ }
+
+#if (!ACPI_REDUCED_HARDWARE)
+
+ /* GPE/SCI support */
+
+ acpi_gbl_all_gpes_initialized = FALSE;
+ acpi_gbl_gpe_xrupt_list_head = NULL;
+ acpi_gbl_gpe_fadt_blocks[0] = NULL;
+ acpi_gbl_gpe_fadt_blocks[1] = NULL;
+ acpi_current_gpe_count = 0;
+
+ acpi_gbl_global_event_handler = NULL;
+ acpi_gbl_sci_handler_list = NULL;
+
+#endif /* !ACPI_REDUCED_HARDWARE */
+
+ /* Global handlers */
+
+ acpi_gbl_global_notify[0].handler = NULL;
+ acpi_gbl_global_notify[1].handler = NULL;
+ acpi_gbl_exception_handler = NULL;
+ acpi_gbl_init_handler = NULL;
+ acpi_gbl_table_handler = NULL;
+ acpi_gbl_interface_handler = NULL;
+
+ /* Global Lock support */
+
+ acpi_gbl_global_lock_semaphore = NULL;
+ acpi_gbl_global_lock_mutex = NULL;
+ acpi_gbl_global_lock_acquired = FALSE;
+ acpi_gbl_global_lock_handle = 0;
+ acpi_gbl_global_lock_present = FALSE;
+
+ /* Miscellaneous variables */
+
+ acpi_gbl_DSDT = NULL;
+ acpi_gbl_cm_single_step = FALSE;
+ acpi_gbl_shutdown = FALSE;
+ acpi_gbl_ns_lookup_count = 0;
+ acpi_gbl_ps_find_count = 0;
+ acpi_gbl_acpi_hardware_present = TRUE;
+ acpi_gbl_last_owner_id_index = 0;
+ acpi_gbl_next_owner_id_offset = 0;
+ acpi_gbl_trace_dbg_level = 0;
+ acpi_gbl_trace_dbg_layer = 0;
+ acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
+ acpi_gbl_osi_mutex = NULL;
+ acpi_gbl_reg_methods_executed = FALSE;
+
+ /* Hardware oriented */
+
+ acpi_gbl_events_initialized = FALSE;
+ acpi_gbl_system_awake_and_running = TRUE;
+
+ /* Namespace */
+
+ acpi_gbl_module_code_list = NULL;
+ acpi_gbl_root_node = NULL;
+ acpi_gbl_root_node_struct.name.integer = ACPI_ROOT_NAME;
+ acpi_gbl_root_node_struct.descriptor_type = ACPI_DESC_TYPE_NAMED;
+ acpi_gbl_root_node_struct.type = ACPI_TYPE_DEVICE;
+ acpi_gbl_root_node_struct.parent = NULL;
+ acpi_gbl_root_node_struct.child = NULL;
+ acpi_gbl_root_node_struct.peer = NULL;
+ acpi_gbl_root_node_struct.object = NULL;
+
+#ifdef ACPI_DISASSEMBLER
+ acpi_gbl_external_list = NULL;
+ acpi_gbl_num_external_methods = 0;
+ acpi_gbl_resolved_external_methods = 0;
+#endif
+
+#ifdef ACPI_DEBUG_OUTPUT
+ acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
+#endif
+
+#ifdef ACPI_DBG_TRACK_ALLOCATIONS
+ acpi_gbl_display_final_mem_stats = FALSE;
+ acpi_gbl_disable_mem_tracking = FALSE;
+#endif
+
+ ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = FALSE);
+
+ return_ACPI_STATUS(AE_OK);
+}
+
/******************************************************************************
*
* FUNCTION: acpi_ut_terminate
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
new file mode 100644
index 000000000000..0ce3f5a0dd67
--- /dev/null
+++ b/drivers/acpi/acpica/utprint.c
@@ -0,0 +1,664 @@
+/******************************************************************************
+ *
+ * Module Name: utprint - Formatted printing routines
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utprint")
+
+#define ACPI_FORMAT_SIGN 0x01
+#define ACPI_FORMAT_SIGN_PLUS 0x02
+#define ACPI_FORMAT_SIGN_PLUS_SPACE 0x04
+#define ACPI_FORMAT_ZERO 0x08
+#define ACPI_FORMAT_LEFT 0x10
+#define ACPI_FORMAT_UPPER 0x20
+#define ACPI_FORMAT_PREFIX 0x40
+/* Local prototypes */
+static acpi_size
+acpi_ut_bound_string_length(const char *string, acpi_size count);
+
+static char *acpi_ut_bound_string_output(char *string, const char *end, char c);
+
+static char *acpi_ut_format_number(char *string,
+ char *end,
+ u64 number,
+ u8 base, s32 width, s32 precision, u8 type);
+
+static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper);
+
+/* Module globals */
+
+static const char acpi_gbl_lower_hex_digits[] = "0123456789abcdef";
+static const char acpi_gbl_upper_hex_digits[] = "0123456789ABCDEF";
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_bound_string_length
+ *
+ * PARAMETERS: string - String with boundary
+ * count - Boundary of the string
+ *
+ * RETURN: Length of the string. Less than or equal to Count.
+ *
+ * DESCRIPTION: Calculate the length of a string with boundary.
+ *
+ ******************************************************************************/
+
+static acpi_size
+acpi_ut_bound_string_length(const char *string, acpi_size count)
+{
+ u32 length = 0;
+
+ while (*string && count) {
+ length++;
+ string++;
+ count--;
+ }
+
+ return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_bound_string_output
+ *
+ * PARAMETERS: string - String with boundary
+ * end - Boundary of the string
+ * c - Character to be output to the string
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Output a character into a string with boundary check.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_bound_string_output(char *string, const char *end, char c)
+{
+
+ if (string < end) {
+ *string = c;
+ }
+
+ ++string;
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_put_number
+ *
+ * PARAMETERS: string - Buffer to hold reverse-ordered string
+ * number - Integer to be converted
+ * base - Base of the integer
+ * upper - Whether or not using upper cased digits
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Convert an integer into a string, note that, the string holds a
+ * reversed ordered number without the trailing zero.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_put_number(char *string, u64 number, u8 base, u8 upper)
+{
+ const char *digits;
+ u64 digit_index;
+ char *pos;
+
+ pos = string;
+ digits = upper ? acpi_gbl_upper_hex_digits : acpi_gbl_lower_hex_digits;
+
+ if (number == 0) {
+ *(pos++) = '0';
+ } else {
+ while (number) {
+ (void)acpi_ut_divide(number, base, &number,
+ &digit_index);
+ *(pos++) = digits[digit_index];
+ }
+ }
+
+ /* *(Pos++) = '0'; */
+ return (pos);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_scan_number
+ *
+ * PARAMETERS: string - String buffer
+ * number_ptr - Where the number is returned
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Scan a string for a decimal integer.
+ *
+ ******************************************************************************/
+
+const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
+{
+ u64 number = 0;
+
+ while (ACPI_IS_DIGIT(*string)) {
+ number *= 10;
+ number += *(string++) - '0';
+ }
+
+ *number_ptr = number;
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_print_number
+ *
+ * PARAMETERS: string - String buffer
+ * number - The number to be converted
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Print a decimal integer into a string.
+ *
+ ******************************************************************************/
+
+const char *acpi_ut_print_number(char *string, u64 number)
+{
+ char ascii_string[20];
+ const char *pos1;
+ char *pos2;
+
+ pos1 = acpi_ut_put_number(ascii_string, number, 10, FALSE);
+ pos2 = string;
+
+ while (pos1 != ascii_string) {
+ *(pos2++) = *(--pos1);
+ }
+
+ *pos2 = 0;
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_format_number
+ *
+ * PARAMETERS: string - String buffer with boundary
+ * end - Boundary of the string
+ * number - The number to be converted
+ * base - Base of the integer
+ * width - Field width
+ * precision - Precision of the integer
+ * type - Special printing flags
+ *
+ * RETURN: Updated position for next valid character
+ *
+ * DESCRIPTION: Print an integer into a string with any base and any precision.
+ *
+ ******************************************************************************/
+
+static char *acpi_ut_format_number(char *string,
+ char *end,
+ u64 number,
+ u8 base, s32 width, s32 precision, u8 type)
+{
+ char *pos;
+ char sign;
+ char zero;
+ u8 need_prefix;
+ u8 upper;
+ s32 i;
+ char reversed_string[66];
+
+ /* Parameter validation */
+
+ if (base < 2 || base > 16) {
+ return (NULL);
+ }
+
+ if (type & ACPI_FORMAT_LEFT) {
+ type &= ~ACPI_FORMAT_ZERO;
+ }
+
+ need_prefix = ((type & ACPI_FORMAT_PREFIX)
+ && base != 10) ? TRUE : FALSE;
+ upper = (type & ACPI_FORMAT_UPPER) ? TRUE : FALSE;
+ zero = (type & ACPI_FORMAT_ZERO) ? '0' : ' ';
+
+ /* Calculate size according to sign and prefix */
+
+ sign = '\0';
+ if (type & ACPI_FORMAT_SIGN) {
+ if ((s64) number < 0) {
+ sign = '-';
+ number = -(s64) number;
+ width--;
+ } else if (type & ACPI_FORMAT_SIGN_PLUS) {
+ sign = '+';
+ width--;
+ } else if (type & ACPI_FORMAT_SIGN_PLUS_SPACE) {
+ sign = ' ';
+ width--;
+ }
+ }
+ if (need_prefix) {
+ width--;
+ if (base == 16) {
+ width--;
+ }
+ }
+
+ /* Generate full string in reverse order */
+
+ pos = acpi_ut_put_number(reversed_string, number, base, upper);
+ i = ACPI_PTR_DIFF(pos, reversed_string);
+
+ /* Printing 100 using %2d gives "100", not "00" */
+
+ if (i > precision) {
+ precision = i;
+ }
+
+ width -= precision;
+
+ /* Output the string */
+
+ if (!(type & (ACPI_FORMAT_ZERO | ACPI_FORMAT_LEFT))) {
+ while (--width >= 0) {
+ string = acpi_ut_bound_string_output(string, end, ' ');
+ }
+ }
+ if (sign) {
+ string = acpi_ut_bound_string_output(string, end, sign);
+ }
+ if (need_prefix) {
+ string = acpi_ut_bound_string_output(string, end, '0');
+ if (base == 16) {
+ string = acpi_ut_bound_string_output(string, end,
+ upper ? 'X' : 'x');
+ }
+ }
+ if (!(type & ACPI_FORMAT_LEFT)) {
+ while (--width >= 0) {
+ string = acpi_ut_bound_string_output(string, end, zero);
+ }
+ }
+
+ while (i <= --precision) {
+ string = acpi_ut_bound_string_output(string, end, '0');
+ }
+ while (--i >= 0) {
+ string = acpi_ut_bound_string_output(string, end,
+ reversed_string[i]);
+ }
+ while (--width >= 0) {
+ string = acpi_ut_bound_string_output(string, end, ' ');
+ }
+
+ return (string);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_vsnprintf
+ *
+ * PARAMETERS: string - String with boundary
+ * size - Boundary of the string
+ * format - Standard printf format
+ * args - Argument list
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a string using argument list pointer.
+ *
+ ******************************************************************************/
+
+int
+acpi_ut_vsnprintf(char *string,
+ acpi_size size, const char *format, va_list args)
+{
+ u8 base = 10;
+ u8 type = 0;
+ s32 width = -1;
+ s32 precision = -1;
+ char qualifier = 0;
+ u64 number;
+ char *pos;
+ char *end;
+ char c;
+ const char *s;
+ const void *p;
+ s32 length;
+ int i;
+
+ pos = string;
+ end = string + size;
+
+ for (; *format; ++format) {
+ if (*format != '%') {
+ pos = acpi_ut_bound_string_output(pos, end, *format);
+ continue;
+ }
+
+ /* Process sign */
+
+ do {
+ ++format;
+ if (*format == '#') {
+ type |= ACPI_FORMAT_PREFIX;
+ } else if (*format == '0') {
+ type |= ACPI_FORMAT_ZERO;
+ } else if (*format == '+') {
+ type |= ACPI_FORMAT_SIGN_PLUS;
+ } else if (*format == ' ') {
+ type |= ACPI_FORMAT_SIGN_PLUS_SPACE;
+ } else if (*format == '-') {
+ type |= ACPI_FORMAT_LEFT;
+ } else {
+ break;
+ }
+ } while (1);
+
+ /* Process width */
+
+ width = -1;
+ if (ACPI_IS_DIGIT(*format)) {
+ format = acpi_ut_scan_number(format, &number);
+ width = (s32) number;
+ } else if (*format == '*') {
+ ++format;
+ width = va_arg(args, int);
+ if (width < 0) {
+ width = -width;
+ type |= ACPI_FORMAT_LEFT;
+ }
+ }
+
+ /* Process precision */
+
+ precision = -1;
+ if (*format == '.') {
+ ++format;
+ if (ACPI_IS_DIGIT(*format)) {
+ format = acpi_ut_scan_number(format, &number);
+ precision = (s32) number;
+ } else if (*format == '*') {
+ ++format;
+ precision = va_arg(args, int);
+ }
+ if (precision < 0) {
+ precision = 0;
+ }
+ }
+
+ /* Process qualifier */
+
+ qualifier = -1;
+ if (*format == 'h' || *format == 'l' || *format == 'L') {
+ qualifier = *format;
+ ++format;
+
+ if (qualifier == 'l' && *format == 'l') {
+ qualifier = 'L';
+ ++format;
+ }
+ }
+
+ switch (*format) {
+ case '%':
+
+ pos = acpi_ut_bound_string_output(pos, end, '%');
+ continue;
+
+ case 'c':
+
+ if (!(type & ACPI_FORMAT_LEFT)) {
+ while (--width > 0) {
+ pos =
+ acpi_ut_bound_string_output(pos,
+ end,
+ ' ');
+ }
+ }
+
+ c = (char)va_arg(args, int);
+ pos = acpi_ut_bound_string_output(pos, end, c);
+
+ while (--width > 0) {
+ pos =
+ acpi_ut_bound_string_output(pos, end, ' ');
+ }
+ continue;
+
+ case 's':
+
+ s = va_arg(args, char *);
+ if (!s) {
+ s = "<NULL>";
+ }
+ length = acpi_ut_bound_string_length(s, precision);
+ if (!(type & ACPI_FORMAT_LEFT)) {
+ while (length < width--) {
+ pos =
+ acpi_ut_bound_string_output(pos,
+ end,
+ ' ');
+ }
+ }
+ for (i = 0; i < length; ++i) {
+ pos = acpi_ut_bound_string_output(pos, end, *s);
+ ++s;
+ }
+ while (length < width--) {
+ pos =
+ acpi_ut_bound_string_output(pos, end, ' ');
+ }
+ continue;
+
+ case 'o':
+
+ base = 8;
+ break;
+
+ case 'X':
+
+ type |= ACPI_FORMAT_UPPER;
+
+ case 'x':
+
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+
+ type |= ACPI_FORMAT_SIGN;
+
+ case 'u':
+
+ break;
+
+ case 'p':
+
+ if (width == -1) {
+ width = 2 * sizeof(void *);
+ type |= ACPI_FORMAT_ZERO;
+ }
+
+ p = va_arg(args, void *);
+ pos = acpi_ut_format_number(pos, end,
+ ACPI_TO_INTEGER(p), 16,
+ width, precision, type);
+ continue;
+
+ default:
+
+ pos = acpi_ut_bound_string_output(pos, end, '%');
+ if (*format) {
+ pos =
+ acpi_ut_bound_string_output(pos, end,
+ *format);
+ } else {
+ --format;
+ }
+ continue;
+ }
+
+ if (qualifier == 'L') {
+ number = va_arg(args, u64);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (s64) number;
+ }
+ } else if (qualifier == 'l') {
+ number = va_arg(args, unsigned long);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (s32) number;
+ }
+ } else if (qualifier == 'h') {
+ number = (u16)va_arg(args, int);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (s16) number;
+ }
+ } else {
+ number = va_arg(args, unsigned int);
+ if (type & ACPI_FORMAT_SIGN) {
+ number = (signed int)number;
+ }
+ }
+
+ pos = acpi_ut_format_number(pos, end, number, base,
+ width, precision, type);
+ }
+
+ if (size > 0) {
+ if (pos < end) {
+ *pos = '\0';
+ } else {
+ end[-1] = '\0';
+ }
+ }
+
+ return (ACPI_PTR_DIFF(pos, string));
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_snprintf
+ *
+ * PARAMETERS: string - String with boundary
+ * size - Boundary of the string
+ * Format, ... - Standard printf format
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a string.
+ *
+ ******************************************************************************/
+
+int acpi_ut_snprintf(char *string, acpi_size size, const char *format, ...)
+{
+ va_list args;
+ int length;
+
+ va_start(args, format);
+ length = acpi_ut_vsnprintf(string, size, format, args);
+ va_end(args);
+
+ return (length);
+}
+
+#ifdef ACPI_APPLICATION
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_file_vprintf
+ *
+ * PARAMETERS: file - File descriptor
+ * format - Standard printf format
+ * args - Argument list
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a file using argument list pointer.
+ *
+ ******************************************************************************/
+
+int acpi_ut_file_vprintf(ACPI_FILE file, const char *format, va_list args)
+{
+ acpi_cpu_flags flags;
+ int length;
+
+ flags = acpi_os_acquire_lock(acpi_gbl_print_lock);
+ length = acpi_ut_vsnprintf(acpi_gbl_print_buffer,
+ sizeof(acpi_gbl_print_buffer), format, args);
+
+ (void)acpi_os_write_file(file, acpi_gbl_print_buffer, length, 1);
+ acpi_os_release_lock(acpi_gbl_print_lock, flags);
+
+ return (length);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_file_printf
+ *
+ * PARAMETERS: file - File descriptor
+ * Format, ... - Standard printf format
+ *
+ * RETURN: Number of bytes actually written.
+ *
+ * DESCRIPTION: Formatted output to a file.
+ *
+ ******************************************************************************/
+
+int acpi_ut_file_printf(ACPI_FILE file, const char *format, ...)
+{
+ va_list args;
+ int length;
+
+ va_start(args, format);
+ length = acpi_ut_file_vprintf(file, format, args);
+ va_end(args);
+
+ return (length);
+}
+#endif
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
new file mode 100644
index 000000000000..4dc33130f134
--- /dev/null
+++ b/drivers/acpi/acpica/utuuid.c
@@ -0,0 +1,96 @@
+/******************************************************************************
+ *
+ * Module Name: utuuid -- UUID support functions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2014, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+
+#define _COMPONENT ACPI_COMPILER
+ACPI_MODULE_NAME("utuuid")
+
+/*
+ * UUID support functions.
+ *
+ * This table is used to convert an input UUID ascii string to a 16 byte
+ * buffer and the reverse. The table maps a UUID buffer index 0-15 to
+ * the index within the 36-byte UUID string where the associated 2-byte
+ * hex value can be found.
+ *
+ * 36-byte UUID strings are of the form:
+ * aabbccdd-eeff-gghh-iijj-kkllmmnnoopp
+ * Where aa-pp are one byte hex numbers, made up of two hex digits
+ *
+ * Note: This table is basically the inverse of the string-to-offset table
+ * found in the ACPI spec in the description of the to_UUID macro.
+ */
+const u8 acpi_gbl_map_to_uuid_offset[UUID_BUFFER_LENGTH] = {
+ 6, 4, 2, 0, 11, 9, 16, 14, 19, 21, 24, 26, 28, 30, 32, 34
+};
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_convert_string_to_uuid
+ *
+ * PARAMETERS: in_string - 36-byte formatted UUID string
+ * uuid_buffer - Where the 16-byte UUID buffer is returned
+ *
+ * RETURN: None. Output data is returned in the uuid_buffer
+ *
+ * DESCRIPTION: Convert a 36-byte formatted UUID string to 16-byte UUID buffer
+ *
+ ******************************************************************************/
+
+void acpi_ut_convert_string_to_uuid(char *in_string, u8 *uuid_buffer)
+{
+ u32 i;
+
+ for (i = 0; i < UUID_BUFFER_LENGTH; i++) {
+ uuid_buffer[i] =
+ (acpi_ut_ascii_char_to_hex
+ (in_string[acpi_gbl_map_to_uuid_offset[i]]) << 4);
+
+ uuid_buffer[i] |=
+ acpi_ut_ascii_char_to_hex(in_string
+ [acpi_gbl_map_to_uuid_offset[i] +
+ 1]);
+ }
+}
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index e5bcd919d4e6..16129c78b489 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -121,11 +121,11 @@ struct dentry;
struct dentry *apei_get_debugfs_dir(void);
#define apei_estatus_for_each_section(estatus, section) \
- for (section = (struct acpi_generic_data *)(estatus + 1); \
+ for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
(void *)section - (void *)estatus < estatus->data_length; \
section = (void *)(section+1) + section->error_data_length)
-static inline u32 cper_estatus_len(struct acpi_generic_status *estatus)
+static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
{
if (estatus->raw_data_length)
return estatus->raw_data_offset + \
@@ -135,9 +135,9 @@ static inline u32 cper_estatus_len(struct acpi_generic_status *estatus)
}
void cper_estatus_print(const char *pfx,
- const struct acpi_generic_status *estatus);
-int cper_estatus_check_header(const struct acpi_generic_status *estatus);
-int cper_estatus_check(const struct acpi_generic_status *estatus);
+ const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index e05d84e7b06d..fc5f780bb61d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -74,13 +74,13 @@
#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_cache) + (estatus_len))
#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
- ((struct acpi_generic_status *) \
+ ((struct acpi_hest_generic_status *) \
((struct ghes_estatus_cache *)(estatus_cache) + 1))
#define GHES_ESTATUS_NODE_LEN(estatus_len) \
(sizeof(struct ghes_estatus_node) + (estatus_len))
#define GHES_ESTATUS_FROM_NODE(estatus_node) \
- ((struct acpi_generic_status *) \
+ ((struct acpi_hest_generic_status *) \
((struct ghes_estatus_node *)(estatus_node) + 1))
bool ghes_disable;
@@ -388,7 +388,7 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
+static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
{
#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
unsigned long pfn;
@@ -421,10 +421,10 @@ static void ghes_handle_memory_failure(struct acpi_generic_data *gdata, int sev)
}
static void ghes_do_proc(struct ghes *ghes,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
- struct acpi_generic_data *gdata;
+ struct acpi_hest_generic_data *gdata;
sev = ghes_severity(estatus->error_severity);
apei_estatus_for_each_section(estatus, gdata) {
@@ -476,7 +476,7 @@ static void ghes_do_proc(struct ghes *ghes,
static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
static atomic_t seqno;
unsigned int curr_seqno;
@@ -498,7 +498,7 @@ static void __ghes_print_estatus(const char *pfx,
static int ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
@@ -520,13 +520,13 @@ static int ghes_print_estatus(const char *pfx,
* GHES error status reporting throttle, to report more kinds of
* errors, instead of just most frequently occurred errors.
*/
-static int ghes_estatus_cached(struct acpi_generic_status *estatus)
+static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
{
u32 len;
int i, cached = 0;
unsigned long long now;
struct ghes_estatus_cache *cache;
- struct acpi_generic_status *cache_estatus;
+ struct acpi_hest_generic_status *cache_estatus;
len = cper_estatus_len(estatus);
rcu_read_lock();
@@ -551,12 +551,12 @@ static int ghes_estatus_cached(struct acpi_generic_status *estatus)
static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
struct acpi_hest_generic *generic,
- struct acpi_generic_status *estatus)
+ struct acpi_hest_generic_status *estatus)
{
int alloced;
u32 len, cache_len;
struct ghes_estatus_cache *cache;
- struct acpi_generic_status *cache_estatus;
+ struct acpi_hest_generic_status *cache_estatus;
alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
@@ -599,7 +599,7 @@ static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
static void ghes_estatus_cache_add(
struct acpi_hest_generic *generic,
- struct acpi_generic_status *estatus)
+ struct acpi_hest_generic_status *estatus)
{
int i, slot = -1, count;
unsigned long long now, duration, period, max_period = 0;
@@ -757,7 +757,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct llist_node *llnode, *next;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -790,7 +790,7 @@ static void ghes_print_queued_estatus(void)
struct llist_node *llnode;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
u32 len, node_len;
llnode = llist_del_all(&ghes_estatus_llist);
@@ -849,7 +849,7 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
u32 len, node_len;
struct ghes_estatus_node *estatus_node;
- struct acpi_generic_status *estatus;
+ struct acpi_hest_generic_status *estatus;
#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
@@ -991,7 +991,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
rc = -EIO;
if (generic->error_block_length <
- sizeof(struct acpi_generic_status)) {
+ sizeof(struct acpi_hest_generic_status)) {
pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
generic->error_block_length,
generic->header.source_id);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 130f513e08c9..48bcf38a0ea8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -35,7 +35,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/suspend.h>
-#include <linux/delay.h>
#include <asm/unaligned.h>
#ifdef CONFIG_ACPI_PROCFS_POWER
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 3d8413d02a97..36eb42e3b0bb 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -247,75 +247,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
/*
- * The following machines have broken backlight support when reporting
- * the Windows 2012 OSI, so disable it until their support is fixed.
+ * These machines will power on immediately after shutdown when
+ * reporting the Windows 2012 OSI.
*/
{
.callback = dmi_disable_osi_win8,
- .ident = "ASUS Zenbook Prime UX31A",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad Edge E530",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Acer Aspire V5-573G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "Acer Aspire V5-572G",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad T431s",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "ThinkPad T430",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
.ident = "Dell Inspiron 7737",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c5bc8cfe09fa..8581f5b84f48 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -477,9 +477,6 @@ static int __init acpi_bus_init_irq(void)
return 0;
}
-u8 acpi_gbl_permanent_mmap;
-
-
void __init acpi_early_init(void)
{
acpi_status status;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index db35594d4df7..6d5d1832a588 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -79,11 +79,13 @@ static int acpi_button_remove(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
+static int acpi_button_suspend(struct device *dev);
static int acpi_button_resume(struct device *dev);
#else
+#define acpi_button_suspend NULL
#define acpi_button_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
+static SIMPLE_DEV_PM_OPS(acpi_button_pm, acpi_button_suspend, acpi_button_resume);
static struct acpi_driver acpi_button_driver = {
.name = "button",
@@ -102,6 +104,7 @@ struct acpi_button {
struct input_dev *input;
char phys[32]; /* for input device */
unsigned long pushed;
+ bool suspended;
};
static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier);
@@ -293,15 +296,19 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
if (button->type == ACPI_BUTTON_TYPE_LID) {
acpi_lid_send_state(device);
} else {
- int keycode = test_bit(KEY_SLEEP, input->keybit) ?
- KEY_SLEEP : KEY_POWER;
+ int keycode;
+
+ pm_wakeup_event(&device->dev, 0);
+ if (button->suspended)
+ break;
+ keycode = test_bit(KEY_SLEEP, input->keybit) ?
+ KEY_SLEEP : KEY_POWER;
input_report_key(input, keycode, 1);
input_sync(input);
input_report_key(input, keycode, 0);
input_sync(input);
- pm_wakeup_event(&device->dev, 0);
acpi_bus_generate_netlink_event(
device->pnp.device_class,
dev_name(&device->dev),
@@ -316,11 +323,21 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
}
#ifdef CONFIG_PM_SLEEP
+static int acpi_button_suspend(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct acpi_button *button = acpi_driver_data(device);
+
+ button->suspended = true;
+ return 0;
+}
+
static int acpi_button_resume(struct device *dev)
{
struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
+ button->suspended = false;
if (button->type == ACPI_BUTTON_TYPE_LID)
return acpi_lid_send_state(device);
return 0;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 49a51277f81d..67075f800e34 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -367,29 +367,61 @@ EXPORT_SYMBOL(acpi_bus_power_manageable);
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
+static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
+{
+ struct acpi_device *adev;
+
+ if (val != ACPI_NOTIFY_DEVICE_WAKE)
+ return;
+
+ adev = acpi_bus_get_acpi_device(handle);
+ if (!adev)
+ return;
+
+ mutex_lock(&acpi_pm_notifier_lock);
+
+ if (adev->wakeup.flags.notifier_present) {
+ __pm_wakeup_event(adev->wakeup.ws, 0);
+ if (adev->wakeup.context.work.func)
+ queue_pm_work(&adev->wakeup.context.work);
+ }
+
+ mutex_unlock(&acpi_pm_notifier_lock);
+
+ acpi_bus_put_acpi_device(adev);
+}
+
/**
- * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
- * @adev: ACPI device to add the notifier for.
- * @context: Context information to pass to the notifier routine.
+ * acpi_add_pm_notifier - Register PM notify handler for given ACPI device.
+ * @adev: ACPI device to add the notify handler for.
+ * @dev: Device to generate a wakeup event for while handling the notification.
+ * @work_func: Work function to execute when handling the notification.
*
* NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
* PM wakeup events. For example, wakeup events may be generated for bridges
* if one of the devices below the bridge is signaling wakeup, even if the
* bridge itself doesn't have a wakeup GPE associated with it.
*/
-acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler, void *context)
+acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
+ void (*work_func)(struct work_struct *work))
{
acpi_status status = AE_ALREADY_EXISTS;
+ if (!dev && !work_func)
+ return AE_BAD_PARAMETER;
+
mutex_lock(&acpi_pm_notifier_lock);
if (adev->wakeup.flags.notifier_present)
goto out;
- status = acpi_install_notify_handler(adev->handle,
- ACPI_SYSTEM_NOTIFY,
- handler, context);
+ adev->wakeup.ws = wakeup_source_register(dev_name(&adev->dev));
+ adev->wakeup.context.dev = dev;
+ if (work_func)
+ INIT_WORK(&adev->wakeup.context.work, work_func);
+
+ status = acpi_install_notify_handler(adev->handle, ACPI_SYSTEM_NOTIFY,
+ acpi_pm_notify_handler, NULL);
if (ACPI_FAILURE(status))
goto out;
@@ -404,8 +436,7 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
* acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
* @adev: ACPI device to remove the notifier from.
*/
-acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
- acpi_notify_handler handler)
+acpi_status acpi_remove_pm_notifier(struct acpi_device *adev)
{
acpi_status status = AE_BAD_PARAMETER;
@@ -416,10 +447,17 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
status = acpi_remove_notify_handler(adev->handle,
ACPI_SYSTEM_NOTIFY,
- handler);
+ acpi_pm_notify_handler);
if (ACPI_FAILURE(status))
goto out;
+ if (adev->wakeup.context.work.func) {
+ cancel_work_sync(&adev->wakeup.context.work);
+ adev->wakeup.context.work.func = NULL;
+ }
+ adev->wakeup.context.dev = NULL;
+ wakeup_source_unregister(adev->wakeup.ws);
+
adev->wakeup.flags.notifier_present = false;
out:
@@ -558,7 +596,6 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
*/
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
{
- acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_device *adev;
int ret, d_min, d_max;
@@ -573,8 +610,9 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
d_max_in = ACPI_STATE_D3_HOT;
}
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
@@ -600,26 +638,25 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
}
EXPORT_SYMBOL(acpi_pm_device_sleep_state);
-#ifdef CONFIG_PM_RUNTIME
/**
- * acpi_wakeup_device - Wakeup notification handler for ACPI devices.
- * @handle: ACPI handle of the device the notification is for.
- * @event: Type of the signaled event.
- * @context: Device corresponding to @handle.
+ * acpi_pm_notify_work_func - ACPI devices wakeup notification work function.
+ * @work: Work item to handle.
*/
-static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
+static void acpi_pm_notify_work_func(struct work_struct *work)
{
- struct device *dev = context;
+ struct device *dev;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) {
+ dev = container_of(work, struct acpi_device_wakeup_context, work)->dev;
+ if (dev) {
pm_wakeup_event(dev, 0);
pm_runtime_resume(dev);
}
}
/**
- * __acpi_device_run_wake - Enable/disable runtime remote wakeup for device.
- * @adev: ACPI device to enable/disable the remote wakeup for.
+ * acpi_device_wakeup - Enable/disable wakeup functionality for device.
+ * @adev: ACPI device to enable/disable wakeup functionality for.
+ * @target_state: State the system is transitioning into.
* @enable: Whether to enable or disable the wakeup functionality.
*
* Enable/disable the GPE associated with @adev so that it can generate
@@ -629,7 +666,8 @@ static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
-int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
+static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
+ bool enable)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
@@ -637,7 +675,7 @@ int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
acpi_status res;
int error;
- error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0);
+ error = acpi_enable_wakeup_device_power(adev, target_state);
if (error)
return error;
@@ -653,6 +691,7 @@ int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
/**
* acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
* @dev: Device to enable/disable the platform to wake up.
@@ -661,63 +700,42 @@ int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
{
struct acpi_device *adev;
- acpi_handle handle;
if (!device_run_wake(phys_dev))
return -EINVAL;
- handle = ACPI_HANDLE(phys_dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
- __func__);
+ adev = ACPI_COMPANION(phys_dev);
+ if (!adev) {
+ dev_dbg(phys_dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
- return __acpi_device_run_wake(adev, enable);
+ return acpi_device_wakeup(adev, enable, ACPI_STATE_S0);
}
EXPORT_SYMBOL(acpi_pm_device_run_wake);
-#else
-static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
- void *context) {}
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
/**
- * __acpi_device_sleep_wake - Enable or disable device to wake up the system.
- * @dev: Device to enable/desible to wake up the system.
- * @target_state: System state the device is supposed to wake up from.
- * @enable: Whether to enable or disable @dev to wake up the system.
- */
-int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state,
- bool enable)
-{
- return enable ?
- acpi_enable_wakeup_device_power(adev, target_state) :
- acpi_disable_wakeup_device_power(adev);
-}
-
-/**
* acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
* @dev: Device to enable/desible to wake up the system from sleep states.
* @enable: Whether to enable or disable @dev to wake up the system.
*/
int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
- acpi_handle handle;
struct acpi_device *adev;
int error;
if (!device_can_wakeup(dev))
return -EINVAL;
- handle = ACPI_HANDLE(dev);
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
+ adev = ACPI_COMPANION(dev);
+ if (!adev) {
+ dev_dbg(dev, "ACPI companion missing in %s!\n", __func__);
return -ENODEV;
}
- error = __acpi_device_sleep_wake(adev, acpi_target_system_state(),
- enable);
+ error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
if (!error)
dev_info(dev, "System wakeup %s by ACPI\n",
enable ? "enabled" : "disabled");
@@ -775,13 +793,13 @@ int acpi_dev_runtime_suspend(struct device *dev)
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
PM_QOS_FLAGS_NONE;
- error = __acpi_device_run_wake(adev, remote_wakeup);
+ error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup);
if (remote_wakeup && error)
return -EAGAIN;
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
if (error)
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
return error;
}
@@ -804,7 +822,7 @@ int acpi_dev_runtime_resume(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
@@ -860,13 +878,13 @@ int acpi_dev_suspend_late(struct device *dev)
target_state = acpi_target_system_state();
wakeup = device_may_wakeup(dev);
- error = __acpi_device_sleep_wake(adev, target_state, wakeup);
+ error = acpi_device_wakeup(adev, target_state, wakeup);
if (wakeup && error)
return error;
error = acpi_dev_pm_low_power(dev, adev, target_state);
if (error)
- __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
return error;
}
@@ -889,7 +907,7 @@ int acpi_dev_resume_early(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- __acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
@@ -1048,11 +1066,11 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
if (dev->pm_domain)
return -EEXIST;
- acpi_add_pm_notifier(adev, acpi_wakeup_device, dev);
+ acpi_add_pm_notifier(adev, dev, acpi_pm_notify_work_func);
dev->pm_domain = &acpi_general_pm_domain;
if (power_on) {
acpi_dev_pm_full_power(adev);
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
}
return 0;
}
@@ -1076,7 +1094,7 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
if (adev && dev->pm_domain == &acpi_general_pm_domain) {
dev->pm_domain = NULL;
- acpi_remove_pm_notifier(adev, acpi_wakeup_device);
+ acpi_remove_pm_notifier(adev);
if (power_off) {
/*
* If the device's PM QoS resume latency limit or flags
@@ -1086,7 +1104,7 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
- __acpi_device_run_wake(adev, false);
+ acpi_device_wakeup(adev, ACPI_STATE_S0, false);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 7de5b603f272..4c5cf77e7576 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -84,8 +84,6 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type, unsigned long long sta);
void acpi_device_add_finalize(struct acpi_device *device);
void acpi_free_pnp_ids(struct acpi_device_pnp *pnp);
-int acpi_bind_one(struct device *dev, struct acpi_device *adev);
-int acpi_unbind_one(struct device *dev);
bool acpi_device_is_present(struct acpi_device *adev);
bool acpi_device_is_battery(struct acpi_device *adev);
@@ -108,7 +106,12 @@ int acpi_power_transition(struct acpi_device *device, int state);
int acpi_device_update_power(struct acpi_device *device, int *state_p);
int acpi_wakeup_device_init(void);
+
+#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
void acpi_early_processor_set_pdc(void);
+#else
+static inline void acpi_early_processor_set_pdc(void) {}
+#endif
/* --------------------------------------------------------------------------
Embedded Controller
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index bad25b070fe0..3abe9b223ba7 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -259,12 +259,14 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
"System description tables not found\n");
return 0;
}
- } else {
+ } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
acpi_physical_address pa = 0;
acpi_find_root_pointer(&pa);
return pa;
}
+
+ return 0;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 9c62340c2360..c96887d5289e 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -481,6 +481,10 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
if (!pin)
return;
+ /* Keep IOAPIC pin configuration when suspending */
+ if (dev->dev.power.is_prepared)
+ return;
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@@ -498,5 +502,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
*/
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
- acpi_unregister_gsi(gsi);
+ if (gsi >= 0 && dev->irq > 0)
+ acpi_unregister_gsi(gsi);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index d388f13d48b4..e6ae603ed1a1 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -593,7 +593,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (no_aspm)
pcie_no_aspm();
- pci_acpi_add_bus_pm_notifier(device, root->bus);
+ pci_acpi_add_bus_pm_notifier(device);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 71e2065639a6..e32321ce9a5c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -4,17 +4,11 @@
*
* Alex Chiang <achiang@hp.com>
* - Unified x86/ia64 implementations
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * - Added _PDC for platforms with Intel CPUs
*/
#include <linux/export.h>
-#include <linux/dmi.h>
-#include <linux/slab.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
-#include "internal.h"
-
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_core");
@@ -135,6 +129,8 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
map_lapic_id(header, acpi_id, &apic_id);
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
map_lsapic_id(header, type, acpi_id, &apic_id);
+ } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
+ map_x2apic_id(header, type, acpi_id, &apic_id);
}
exit:
@@ -208,195 +204,3 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
return acpi_map_cpuid(apic_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-
-static bool __init processor_physically_present(acpi_handle handle)
-{
- int cpuid, type;
- u32 acpi_id;
- acpi_status status;
- acpi_object_type acpi_type;
- unsigned long long tmp;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
- status = acpi_get_type(handle, &acpi_type);
- if (ACPI_FAILURE(status))
- return false;
-
- switch (acpi_type) {
- case ACPI_TYPE_PROCESSOR:
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = object.processor.proc_id;
- break;
- case ACPI_TYPE_DEVICE:
- status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = tmp;
- break;
- default:
- return false;
- }
-
- type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
- cpuid = acpi_get_cpuid(handle, type, acpi_id);
-
- if (cpuid == -1)
- return false;
-
- return true;
-}
-
-static void acpi_set_pdc_bits(u32 *buf)
-{
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
-
- /* Enable coordination with firmware's _TSD info */
- buf[2] = ACPI_PDC_SMP_T_SWCOORD;
-
- /* Twiddle arch-specific bits needed for _PDC */
- arch_acpi_set_pdc_bits(buf);
-}
-
-static struct acpi_object_list *acpi_processor_alloc_pdc(void)
-{
- struct acpi_object_list *obj_list;
- union acpi_object *obj;
- u32 *buf;
-
- /* allocate and initialize pdc. It will be used later. */
- obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
- if (!obj_list) {
- printk(KERN_ERR "Memory allocation error\n");
- return NULL;
- }
-
- obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
- if (!obj) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj_list);
- return NULL;
- }
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj);
- kfree(obj_list);
- return NULL;
- }
-
- acpi_set_pdc_bits(buf);
-
- obj->type = ACPI_TYPE_BUFFER;
- obj->buffer.length = 12;
- obj->buffer.pointer = (u8 *) buf;
- obj_list->count = 1;
- obj_list->pointer = obj;
-
- return obj_list;
-}
-
-/*
- * _PDC is required for a BIOS-OS handshake for most of the newer
- * ACPI processor features.
- */
-static acpi_status
-acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
-{
- acpi_status status = AE_OK;
-
- if (boot_option_idle_override == IDLE_NOMWAIT) {
- /*
- * If mwait is disabled for CPU C-states, the C2C3_FFH access
- * mode will be disabled in the parameter of _PDC object.
- * Of course C1_FFH access mode will also be disabled.
- */
- union acpi_object *obj;
- u32 *buffer = NULL;
-
- obj = pdc_in->pointer;
- buffer = (u32 *)(obj->buffer.pointer);
- buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
-
- }
- status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
-
- if (ACPI_FAILURE(status))
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Could not evaluate _PDC, using legacy perf. control.\n"));
-
- return status;
-}
-
-void acpi_processor_set_pdc(acpi_handle handle)
-{
- struct acpi_object_list *obj_list;
-
- if (arch_has_acpi_pdc() == false)
- return;
-
- obj_list = acpi_processor_alloc_pdc();
- if (!obj_list)
- return;
-
- acpi_processor_eval_pdc(handle, obj_list);
-
- kfree(obj_list->pointer->buffer.pointer);
- kfree(obj_list->pointer);
- kfree(obj_list);
-}
-
-static acpi_status __init
-early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- if (processor_physically_present(handle) == false)
- return AE_OK;
-
- acpi_processor_set_pdc(handle);
- return AE_OK;
-}
-
-#if defined(CONFIG_X86) || defined(CONFIG_IA64)
-static int __init set_no_mwait(const struct dmi_system_id *id)
-{
- pr_notice(PREFIX "%s detected - disabling mwait for CPU C-states\n",
- id->ident);
- boot_option_idle_override = IDLE_NOMWAIT;
- return 0;
-}
-
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
- {
- set_no_mwait, "Extensa 5220", {
- DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
- DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
- {},
-};
-
-static void __init processor_dmi_check(void)
-{
- /*
- * Check whether the system is DMI table. If yes, OSPM
- * should not use mwait for CPU-states.
- */
- dmi_check_system(processor_idle_dmi_table);
-}
-#else
-static inline void processor_dmi_check(void) {}
-#endif
-
-void __init acpi_early_processor_set_pdc(void)
-{
- processor_dmi_check();
-
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- early_init_pdc, NULL, NULL, NULL);
- acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
-}
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
new file mode 100644
index 000000000000..e5dd80800930
--- /dev/null
+++ b/drivers/acpi/processor_pdc.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2005 Intel Corporation
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ *
+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+ * - Added _PDC for platforms with Intel CPUs
+ */
+
+#define pr_fmt(fmt) "ACPI: " fmt
+
+#include <linux/dmi.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+
+#include "internal.h"
+
+#define _COMPONENT ACPI_PROCESSOR_COMPONENT
+ACPI_MODULE_NAME("processor_pdc");
+
+static bool __init processor_physically_present(acpi_handle handle)
+{
+ int cpuid, type;
+ u32 acpi_id;
+ acpi_status status;
+ acpi_object_type acpi_type;
+ unsigned long long tmp;
+ union acpi_object object = { 0 };
+ struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
+
+ status = acpi_get_type(handle, &acpi_type);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ switch (acpi_type) {
+ case ACPI_TYPE_PROCESSOR:
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return false;
+ acpi_id = object.processor.proc_id;
+ break;
+ case ACPI_TYPE_DEVICE:
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return false;
+ acpi_id = tmp;
+ break;
+ default:
+ return false;
+ }
+
+ type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
+ cpuid = acpi_get_cpuid(handle, type, acpi_id);
+
+ if (cpuid == -1)
+ return false;
+
+ return true;
+}
+
+static void acpi_set_pdc_bits(u32 *buf)
+{
+ buf[0] = ACPI_PDC_REVISION_ID;
+ buf[1] = 1;
+
+ /* Enable coordination with firmware's _TSD info */
+ buf[2] = ACPI_PDC_SMP_T_SWCOORD;
+
+ /* Twiddle arch-specific bits needed for _PDC */
+ arch_acpi_set_pdc_bits(buf);
+}
+
+static struct acpi_object_list *acpi_processor_alloc_pdc(void)
+{
+ struct acpi_object_list *obj_list;
+ union acpi_object *obj;
+ u32 *buf;
+
+ /* allocate and initialize pdc. It will be used later. */
+ obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
+ if (!obj_list)
+ goto out;
+
+ obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
+ if (!obj) {
+ kfree(obj_list);
+ goto out;
+ }
+
+ buf = kmalloc(12, GFP_KERNEL);
+ if (!buf) {
+ kfree(obj);
+ kfree(obj_list);
+ goto out;
+ }
+
+ acpi_set_pdc_bits(buf);
+
+ obj->type = ACPI_TYPE_BUFFER;
+ obj->buffer.length = 12;
+ obj->buffer.pointer = (u8 *) buf;
+ obj_list->count = 1;
+ obj_list->pointer = obj;
+
+ return obj_list;
+out:
+ pr_err("Memory allocation error\n");
+ return NULL;
+}
+
+/*
+ * _PDC is required for a BIOS-OS handshake for most of the newer
+ * ACPI processor features.
+ */
+static acpi_status
+acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
+{
+ acpi_status status = AE_OK;
+
+ if (boot_option_idle_override == IDLE_NOMWAIT) {
+ /*
+ * If mwait is disabled for CPU C-states, the C2C3_FFH access
+ * mode will be disabled in the parameter of _PDC object.
+ * Of course C1_FFH access mode will also be disabled.
+ */
+ union acpi_object *obj;
+ u32 *buffer = NULL;
+
+ obj = pdc_in->pointer;
+ buffer = (u32 *)(obj->buffer.pointer);
+ buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
+
+ }
+ status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
+
+ if (ACPI_FAILURE(status))
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Could not evaluate _PDC, using legacy perf. control.\n"));
+
+ return status;
+}
+
+void acpi_processor_set_pdc(acpi_handle handle)
+{
+ struct acpi_object_list *obj_list;
+
+ if (arch_has_acpi_pdc() == false)
+ return;
+
+ obj_list = acpi_processor_alloc_pdc();
+ if (!obj_list)
+ return;
+
+ acpi_processor_eval_pdc(handle, obj_list);
+
+ kfree(obj_list->pointer->buffer.pointer);
+ kfree(obj_list->pointer);
+ kfree(obj_list);
+}
+
+static acpi_status __init
+early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
+{
+ if (processor_physically_present(handle) == false)
+ return AE_OK;
+
+ acpi_processor_set_pdc(handle);
+ return AE_OK;
+}
+
+static int __init set_no_mwait(const struct dmi_system_id *id)
+{
+ pr_notice("%s detected - disabling mwait for CPU C-states\n",
+ id->ident);
+ boot_option_idle_override = IDLE_NOMWAIT;
+ return 0;
+}
+
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+ {
+ set_no_mwait, "Extensa 5220", {
+ DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
+ DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
+ {},
+};
+
+static void __init processor_dmi_check(void)
+{
+ /*
+ * Check whether the system is DMI table. If yes, OSPM
+ * should not use mwait for CPU-states.
+ */
+ dmi_check_system(processor_idle_dmi_table);
+}
+
+void __init acpi_early_processor_set_pdc(void)
+{
+ processor_dmi_check();
+
+ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX,
+ early_init_pdc, NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, early_init_pdc, NULL, NULL);
+}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index f775fa0d850f..5d592e17d760 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -77,7 +77,9 @@ void acpi_initialize_hp_context(struct acpi_device *adev,
void (*uevent)(struct acpi_device *, u32))
{
acpi_lock_hp_context();
- acpi_set_hp_context(adev, hp, notify, uevent, NULL);
+ hp->notify = notify;
+ hp->uevent = uevent;
+ acpi_set_hp_context(adev, hp);
acpi_unlock_hp_context();
}
EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
@@ -1421,14 +1423,13 @@ static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
wakeup->sleep_state = sleep_state;
}
}
- acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number);
out:
kfree(buffer.pointer);
return err;
}
-static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
+static void acpi_wakeup_gpe_init(struct acpi_device *device)
{
struct acpi_device_id button_device_ids[] = {
{"PNP0C0C", 0},
@@ -1436,29 +1437,33 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
{"PNP0C0E", 0},
{"", 0},
};
+ struct acpi_device_wakeup *wakeup = &device->wakeup;
acpi_status status;
acpi_event_status event_status;
- device->wakeup.flags.notifier_present = 0;
+ wakeup->flags.notifier_present = 0;
/* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids)) {
- device->wakeup.flags.run_wake = 1;
+ wakeup->flags.run_wake = 1;
if (!acpi_match_device_ids(device, &button_device_ids[1])) {
/* Do not use Lid/sleep button for S5 wakeup */
- if (device->wakeup.sleep_state == ACPI_STATE_S5)
- device->wakeup.sleep_state = ACPI_STATE_S4;
+ if (wakeup->sleep_state == ACPI_STATE_S5)
+ wakeup->sleep_state = ACPI_STATE_S4;
}
+ acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
device_set_wakeup_capable(&device->dev, true);
return;
}
- status = acpi_get_gpe_status(device->wakeup.gpe_device,
- device->wakeup.gpe_number,
- &event_status);
- if (status == AE_OK)
- device->wakeup.flags.run_wake =
- !!(event_status & ACPI_EVENT_FLAG_HANDLE);
+ acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
+ wakeup->gpe_number);
+ status = acpi_get_gpe_status(wakeup->gpe_device, wakeup->gpe_number,
+ &event_status);
+ if (ACPI_FAILURE(status))
+ return;
+
+ wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HANDLE);
}
static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
@@ -1478,7 +1483,7 @@ static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
device->wakeup.flags.valid = 1;
device->wakeup.prepare_count = 0;
- acpi_bus_set_run_wake_flags(device);
+ acpi_wakeup_gpe_init(device);
/* Call _PSW/_DSW object to disable its ability to wake the sleeping
* system for the ACPI device with the _PRW object.
* The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index b3e3cc73ba79..54da4a3fe65e 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -322,6 +322,11 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
static void acpi_sleep_dmi_check(void)
{
+ int year;
+
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012)
+ acpi_nvs_nosave_s3();
+
dmi_check_system(acpisleep_dmi_table);
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 350d52a8f781..826884392e6b 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -204,6 +204,8 @@ struct acpi_video_device {
struct acpi_video_device_flags flags;
struct acpi_video_device_cap cap;
struct list_head entry;
+ struct delayed_work switch_brightness_work;
+ int switch_brightness_event;
struct acpi_video_bus *video;
struct acpi_device *dev;
struct acpi_video_device_brightness *brightness;
@@ -230,8 +232,7 @@ static int acpi_video_device_lcd_get_level_current(
unsigned long long *level, bool raw);
static int acpi_video_get_next_level(struct acpi_video_device *device,
u32 level_current, u32 event);
-static int acpi_video_switch_brightness(struct acpi_video_device *device,
- int event);
+static void acpi_video_switch_brightness(struct work_struct *work);
static bool acpi_video_use_native_backlight(void)
{
@@ -275,6 +276,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
int request_level = bd->props.brightness + 2;
struct acpi_video_device *vd = bl_get_data(bd);
+ cancel_delayed_work(&vd->switch_brightness_work);
return acpi_video_device_lcd_set_level(vd,
vd->brightness->levels[request_level]);
}
@@ -461,6 +463,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X230",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
.ident = "ThinkPad T430 and T430s",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -469,10 +479,42 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_set_use_native_backlight,
- .ident = "ThinkPad X230",
+ .ident = "ThinkPad T430",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad T431s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
},
},
{
@@ -572,6 +614,30 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
},
{
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-572G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-573G",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ASUS Zenbook Prime UX31A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
+ },
+ },
+ {
.callback = video_set_use_native_backlight,
.ident = "HP ProBook 4340s",
.matches = {
@@ -607,6 +673,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
},
{
.callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 2014 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G2"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
.ident = "HP ZBook 14",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
@@ -1188,6 +1263,8 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
data->device_id = device_id;
data->video = video;
data->dev = device;
+ INIT_DELAYED_WORK(&data->switch_brightness_work,
+ acpi_video_switch_brightness);
attribute = acpi_video_get_device_attr(video, device_id);
@@ -1410,15 +1487,18 @@ acpi_video_get_next_level(struct acpi_video_device *device,
}
}
-static int
-acpi_video_switch_brightness(struct acpi_video_device *device, int event)
+static void
+acpi_video_switch_brightness(struct work_struct *work)
{
+ struct acpi_video_device *device = container_of(to_delayed_work(work),
+ struct acpi_video_device, switch_brightness_work);
unsigned long long level_current, level_next;
+ int event = device->switch_brightness_event;
int result = -EINVAL;
/* no warning message if acpi_backlight=vendor or a quirk is used */
if (!acpi_video_verify_backlight_support())
- return 0;
+ return;
if (!device->brightness)
goto out;
@@ -1440,8 +1520,6 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
out:
if (result)
printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
-
- return result;
}
int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
@@ -1609,6 +1687,16 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
return;
}
+static void brightness_switch_event(struct acpi_video_device *video_device,
+ u32 event)
+{
+ if (!brightness_switch_enabled)
+ return;
+
+ video_device->switch_brightness_event = event;
+ schedule_delayed_work(&video_device->switch_brightness_work, HZ / 10);
+}
+
static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
{
struct acpi_video_device *video_device = data;
@@ -1626,28 +1714,23 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
switch (event) {
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESS_CYCLE;
break;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESSUP;
break;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESSDOWN;
break;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightness */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_BRIGHTNESS_ZERO;
break;
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
- if (brightness_switch_enabled)
- acpi_video_switch_brightness(video_device, event);
+ brightness_switch_event(video_device, event);
keycode = KEY_DISPLAY_OFF;
break;
default:
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index 558a239954e8..d8961ef4d2e7 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -25,7 +25,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/tegra-ahb.h>
+
+#include <soc/tegra/ahb.h>
#define DRV_NAME "tegra-ahb"
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e65d400efd44..e1b92788c225 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -16,6 +16,7 @@ menuconfig ATA
depends on BLOCK
depends on !(M32R || M68K || S390) || BROKEN
select SCSI
+ select GLOB
---help---
If you want to use an ATA hard disk, ATA tape drive, ATA CD-ROM or
any other ATA device under Linux, say Y and make sure that you know
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 677c0c1b03bd..dbdc5d32343f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -59,6 +59,7 @@
#include <linux/async.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/glob.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -4250,73 +4251,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ }
};
-/**
- * glob_match - match a text string against a glob-style pattern
- * @text: the string to be examined
- * @pattern: the glob-style pattern to be matched against
- *
- * Either/both of text and pattern can be empty strings.
- *
- * Match text against a glob-style pattern, with wildcards and simple sets:
- *
- * ? matches any single character.
- * * matches any run of characters.
- * [xyz] matches a single character from the set: x, y, or z.
- * [a-d] matches a single character from the range: a, b, c, or d.
- * [a-d0-9] matches a single character from either range.
- *
- * The special characters ?, [, -, or *, can be matched using a set, eg. [*]
- * Behaviour with malformed patterns is undefined, though generally reasonable.
- *
- * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx"
- *
- * This function uses one level of recursion per '*' in pattern.
- * Since it calls _nothing_ else, and has _no_ explicit local variables,
- * this will not cause stack problems for any reasonable use here.
- *
- * RETURNS:
- * 0 on match, 1 otherwise.
- */
-static int glob_match (const char *text, const char *pattern)
-{
- do {
- /* Match single character or a '?' wildcard */
- if (*text == *pattern || *pattern == '?') {
- if (!*pattern++)
- return 0; /* End of both strings: match */
- } else {
- /* Match single char against a '[' bracketed ']' pattern set */
- if (!*text || *pattern != '[')
- break; /* Not a pattern set */
- while (*++pattern && *pattern != ']' && *text != *pattern) {
- if (*pattern == '-' && *(pattern - 1) != '[')
- if (*text > *(pattern - 1) && *text < *(pattern + 1)) {
- ++pattern;
- break;
- }
- }
- if (!*pattern || *pattern == ']')
- return 1; /* No match */
- while (*pattern && *pattern++ != ']');
- }
- } while (*++text && *pattern);
-
- /* Match any run of chars against a '*' wildcard */
- if (*pattern == '*') {
- if (!*++pattern)
- return 0; /* Match: avoid recursion at end of pattern */
- /* Loop to handle additional pattern chars after the wildcard */
- while (*text) {
- if (glob_match(text, pattern) == 0)
- return 0; /* Remainder matched */
- ++text; /* Absorb (match) this char and try again */
- }
- }
- if (!*text && !*pattern)
- return 0; /* End of both strings: match */
- return 1; /* No match */
-}
-
static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
{
unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -4327,10 +4261,10 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
while (ad->model_num) {
- if (!glob_match(model_num, ad->model_num)) {
+ if (glob_match(model_num, ad->model_num)) {
if (ad->model_rev == NULL)
return ad->horkage;
- if (!glob_match(model_rev, ad->model_rev))
+ if (glob_match(model_rev, ad->model_rev))
return ad->horkage;
}
ad++;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 72691fd93948..0586f66d70fa 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3945,7 +3945,7 @@ void ata_scsi_hotplug(struct work_struct *work)
* Zero.
*/
int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
- unsigned int id, unsigned int lun)
+ unsigned int id, u64 lun)
{
struct ata_port *ap = ata_shost_to_port(shost);
unsigned long flags;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 45b5ab3a95d5..5f4e0cca56ec 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -144,7 +144,7 @@ extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(struct work_struct *work);
extern int ata_bus_probe(struct ata_port *ap);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
- unsigned int id, unsigned int lun);
+ unsigned int id, u64 lun);
/* libata-eh.c */
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index aa6be2698669..c39702bc279d 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -533,14 +533,13 @@ static void he_init_tx_lbfp(struct he_dev *he_dev)
static int he_init_tpdrq(struct he_dev *he_dev)
{
- he_dev->tpdrq_base = pci_alloc_consistent(he_dev->pci_dev,
- CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq), &he_dev->tpdrq_phys);
+ he_dev->tpdrq_base = pci_zalloc_consistent(he_dev->pci_dev,
+ CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq),
+ &he_dev->tpdrq_phys);
if (he_dev->tpdrq_base == NULL) {
hprintk("failed to alloc tpdrq\n");
return -ENOMEM;
}
- memset(he_dev->tpdrq_base, 0,
- CONFIG_TPDRQ_SIZE * sizeof(struct he_tpdrq));
he_dev->tpdrq_tail = he_dev->tpdrq_base;
he_dev->tpdrq_head = he_dev->tpdrq_base;
@@ -804,13 +803,13 @@ static int he_init_group(struct he_dev *he_dev, int group)
goto out_free_rbpl_virt;
}
- he_dev->rbpl_base = pci_alloc_consistent(he_dev->pci_dev,
- CONFIG_RBPL_SIZE * sizeof(struct he_rbp), &he_dev->rbpl_phys);
+ he_dev->rbpl_base = pci_zalloc_consistent(he_dev->pci_dev,
+ CONFIG_RBPL_SIZE * sizeof(struct he_rbp),
+ &he_dev->rbpl_phys);
if (he_dev->rbpl_base == NULL) {
hprintk("failed to alloc rbpl_base\n");
goto out_destroy_rbpl_pool;
}
- memset(he_dev->rbpl_base, 0, CONFIG_RBPL_SIZE * sizeof(struct he_rbp));
INIT_LIST_HEAD(&he_dev->rbpl_outstanding);
@@ -843,13 +842,13 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* rx buffer ready queue */
- he_dev->rbrq_base = pci_alloc_consistent(he_dev->pci_dev,
- CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq), &he_dev->rbrq_phys);
+ he_dev->rbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
+ CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
+ &he_dev->rbrq_phys);
if (he_dev->rbrq_base == NULL) {
hprintk("failed to allocate rbrq\n");
goto out_free_rbpl;
}
- memset(he_dev->rbrq_base, 0, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq));
he_dev->rbrq_head = he_dev->rbrq_base;
he_writel(he_dev, he_dev->rbrq_phys, G0_RBRQ_ST + (group * 16));
@@ -867,13 +866,13 @@ static int he_init_group(struct he_dev *he_dev, int group)
/* tx buffer ready queue */
- he_dev->tbrq_base = pci_alloc_consistent(he_dev->pci_dev,
- CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq), &he_dev->tbrq_phys);
+ he_dev->tbrq_base = pci_zalloc_consistent(he_dev->pci_dev,
+ CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
+ &he_dev->tbrq_phys);
if (he_dev->tbrq_base == NULL) {
hprintk("failed to allocate tbrq\n");
goto out_free_rbpq_base;
}
- memset(he_dev->tbrq_base, 0, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq));
he_dev->tbrq_head = he_dev->tbrq_base;
@@ -1460,13 +1459,13 @@ static int he_start(struct atm_dev *dev)
/* host status page */
- he_dev->hsp = pci_alloc_consistent(he_dev->pci_dev,
- sizeof(struct he_hsp), &he_dev->hsp_phys);
+ he_dev->hsp = pci_zalloc_consistent(he_dev->pci_dev,
+ sizeof(struct he_hsp),
+ &he_dev->hsp_phys);
if (he_dev->hsp == NULL) {
hprintk("failed to allocate host status page\n");
return -ENOMEM;
}
- memset(he_dev->hsp, 0, sizeof(struct he_hsp));
he_writel(he_dev, he_dev->hsp_phys, HSP_BA);
/* initialize framer */
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index b621f56a36be..2b24ed056728 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -641,13 +641,11 @@ alloc_scq(struct idt77252_dev *card, int class)
scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL);
if (!scq)
return NULL;
- scq->base = pci_alloc_consistent(card->pcidev, SCQ_SIZE,
- &scq->paddr);
+ scq->base = pci_zalloc_consistent(card->pcidev, SCQ_SIZE, &scq->paddr);
if (scq->base == NULL) {
kfree(scq);
return NULL;
}
- memset(scq->base, 0, SCQ_SIZE);
scq->next = scq->base;
scq->last = scq->base + (SCQ_ENTRIES - 1);
@@ -972,13 +970,12 @@ init_rsq(struct idt77252_dev *card)
{
struct rsq_entry *rsqe;
- card->rsq.base = pci_alloc_consistent(card->pcidev, RSQSIZE,
- &card->rsq.paddr);
+ card->rsq.base = pci_zalloc_consistent(card->pcidev, RSQSIZE,
+ &card->rsq.paddr);
if (card->rsq.base == NULL) {
printk("%s: can't allocate RSQ.\n", card->name);
return -1;
}
- memset(card->rsq.base, 0, RSQSIZE);
card->rsq.last = card->rsq.base + RSQ_NUM_ENTRIES - 1;
card->rsq.next = card->rsq.last;
@@ -3400,14 +3397,14 @@ static int init_card(struct atm_dev *dev)
writel(0, SAR_REG_GP);
/* Initialize RAW Cell Handle Register */
- card->raw_cell_hnd = pci_alloc_consistent(card->pcidev, 2 * sizeof(u32),
- &card->raw_cell_paddr);
+ card->raw_cell_hnd = pci_zalloc_consistent(card->pcidev,
+ 2 * sizeof(u32),
+ &card->raw_cell_paddr);
if (!card->raw_cell_hnd) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
return -1;
}
- memset(card->raw_cell_hnd, 0, 2 * sizeof(u32));
writel(card->raw_cell_paddr, SAR_REG_RAWHND);
IPRINTK("%s: raw cell handle is at 0x%p.\n", card->name,
card->raw_cell_hnd);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 88500fed3c7a..4e7f0ff83ae7 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -289,16 +289,6 @@ config CMA_ALIGNMENT
If unsure, leave the default value "8".
-config CMA_AREAS
- int "Maximum count of the CMA device-private areas"
- default 7
- help
- CMA allows to create CMA areas for particular devices. This parameter
- sets the maximum number of such device private CMA areas in the
- system.
-
- If unsure, leave the default value "7".
-
endif
endmenu
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6467c919c509..6606abdf880c 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -24,23 +24,9 @@
#include <linux/memblock.h>
#include <linux/err.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/page-isolation.h>
#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
-
-struct cma {
- unsigned long base_pfn;
- unsigned long count;
- unsigned long *bitmap;
- struct mutex lock;
-};
-
-struct cma *dma_contiguous_default_area;
+#include <linux/cma.h>
#ifdef CONFIG_CMA_SIZE_MBYTES
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
@@ -48,6 +34,8 @@ struct cma *dma_contiguous_default_area;
#define CMA_SIZE_MBYTES 0
#endif
+struct cma *dma_contiguous_default_area;
+
/*
* Default global CMA area size can be defined in kernel's .config.
* This is useful mainly for distro maintainers to create a kernel
@@ -154,65 +142,6 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
}
}
-static DEFINE_MUTEX(cma_mutex);
-
-static int __init cma_activate_area(struct cma *cma)
-{
- int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
- unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
- unsigned i = cma->count >> pageblock_order;
- struct zone *zone;
-
- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-
- if (!cma->bitmap)
- return -ENOMEM;
-
- WARN_ON_ONCE(!pfn_valid(pfn));
- zone = page_zone(pfn_to_page(pfn));
-
- do {
- unsigned j;
- base_pfn = pfn;
- for (j = pageblock_nr_pages; j; --j, pfn++) {
- WARN_ON_ONCE(!pfn_valid(pfn));
- /*
- * alloc_contig_range requires the pfn range
- * specified to be in the same zone. Make this
- * simple by forcing the entire CMA resv range
- * to be in the same zone.
- */
- if (page_zone(pfn_to_page(pfn)) != zone)
- goto err;
- }
- init_cma_reserved_pageblock(pfn_to_page(base_pfn));
- } while (--i);
-
- mutex_init(&cma->lock);
- return 0;
-
-err:
- kfree(cma->bitmap);
- return -EINVAL;
-}
-
-static struct cma cma_areas[MAX_CMA_AREAS];
-static unsigned cma_area_count;
-
-static int __init cma_init_reserved_areas(void)
-{
- int i;
-
- for (i = 0; i < cma_area_count; i++) {
- int ret = cma_activate_area(&cma_areas[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-core_initcall(cma_init_reserved_areas);
-
/**
* dma_contiguous_reserve_area() - reserve custom contiguous area
* @size: Size of the reserved area (in bytes),
@@ -234,72 +163,17 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
phys_addr_t limit, struct cma **res_cma,
bool fixed)
{
- struct cma *cma = &cma_areas[cma_area_count];
- phys_addr_t alignment;
- int ret = 0;
-
- pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
- (unsigned long)size, (unsigned long)base,
- (unsigned long)limit);
-
- /* Sanity checks */
- if (cma_area_count == ARRAY_SIZE(cma_areas)) {
- pr_err("Not enough slots for CMA reserved regions!\n");
- return -ENOSPC;
- }
-
- if (!size)
- return -EINVAL;
-
- /* Sanitise input arguments */
- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
- base = ALIGN(base, alignment);
- size = ALIGN(size, alignment);
- limit &= ~(alignment - 1);
-
- /* Reserve memory */
- if (base && fixed) {
- if (memblock_is_region_reserved(base, size) ||
- memblock_reserve(base, size) < 0) {
- ret = -EBUSY;
- goto err;
- }
- } else {
- phys_addr_t addr = memblock_alloc_range(size, alignment, base,
- limit);
- if (!addr) {
- ret = -ENOMEM;
- goto err;
- } else {
- base = addr;
- }
- }
-
- /*
- * Each reserved area must be initialised later, when more kernel
- * subsystems (like slab allocator) are available.
- */
- cma->base_pfn = PFN_DOWN(base);
- cma->count = size >> PAGE_SHIFT;
- *res_cma = cma;
- cma_area_count++;
+ int ret;
- pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
- (unsigned long)base);
+ ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma);
+ if (ret)
+ return ret;
/* Architecture specific contiguous memory fixup. */
- dma_contiguous_early_fixup(base, size);
- return 0;
-err:
- pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
- return ret;
-}
+ dma_contiguous_early_fixup(cma_get_base(*res_cma),
+ cma_get_size(*res_cma));
-static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
-{
- mutex_lock(&cma->lock);
- bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
- mutex_unlock(&cma->lock);
+ return 0;
}
/**
@@ -316,62 +190,10 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int align)
{
- unsigned long mask, pfn, pageno, start = 0;
- struct cma *cma = dev_get_cma_area(dev);
- struct page *page = NULL;
- int ret;
-
- if (!cma || !cma->count)
- return NULL;
-
if (align > CONFIG_CMA_ALIGNMENT)
align = CONFIG_CMA_ALIGNMENT;
- pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
- count, align);
-
- if (!count)
- return NULL;
-
- mask = (1 << align) - 1;
-
-
- for (;;) {
- mutex_lock(&cma->lock);
- pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
- start, count, mask);
- if (pageno >= cma->count) {
- mutex_unlock(&cma->lock);
- break;
- }
- bitmap_set(cma->bitmap, pageno, count);
- /*
- * It's safe to drop the lock here. We've marked this region for
- * our exclusive use. If the migration fails we will take the
- * lock again and unmark it.
- */
- mutex_unlock(&cma->lock);
-
- pfn = cma->base_pfn + pageno;
- mutex_lock(&cma_mutex);
- ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
- mutex_unlock(&cma_mutex);
- if (ret == 0) {
- page = pfn_to_page(pfn);
- break;
- } else if (ret != -EBUSY) {
- clear_cma_bitmap(cma, pfn, count);
- break;
- }
- clear_cma_bitmap(cma, pfn, count);
- pr_debug("%s(): memory range at %p is busy, retrying\n",
- __func__, pfn_to_page(pfn));
- /* try again with a bit different memory target */
- start = pageno + mask + 1;
- }
-
- pr_debug("%s(): returned %p\n", __func__, page);
- return page;
+ return cma_alloc(dev_get_cma_area(dev), count, align);
}
/**
@@ -387,23 +209,5 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count)
{
- struct cma *cma = dev_get_cma_area(dev);
- unsigned long pfn;
-
- if (!cma || !pages)
- return false;
-
- pr_debug("%s(page %p)\n", __func__, (void *)pages);
-
- pfn = page_to_pfn(pages);
-
- if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
- return false;
-
- VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
-
- free_contig_range(pfn, count);
- clear_cma_bitmap(cma, pfn, count);
-
- return true;
+ return cma_release(dev_get_cma_area(dev), pages, count);
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 89f752dd8465..a2e13e250bba 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -284,7 +284,7 @@ static int memory_subsys_online(struct device *dev)
* attribute and need to set the online_type.
*/
if (mem->online_type < 0)
- mem->online_type = ONLINE_KEEP;
+ mem->online_type = MMOP_ONLINE_KEEP;
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
@@ -315,23 +315,23 @@ store_mem_state(struct device *dev,
if (ret)
return ret;
- if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
- online_type = ONLINE_KERNEL;
- else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
- online_type = ONLINE_MOVABLE;
- else if (!strncmp(buf, "online", min_t(int, count, 6)))
- online_type = ONLINE_KEEP;
- else if (!strncmp(buf, "offline", min_t(int, count, 7)))
- online_type = -1;
+ if (sysfs_streq(buf, "online_kernel"))
+ online_type = MMOP_ONLINE_KERNEL;
+ else if (sysfs_streq(buf, "online_movable"))
+ online_type = MMOP_ONLINE_MOVABLE;
+ else if (sysfs_streq(buf, "online"))
+ online_type = MMOP_ONLINE_KEEP;
+ else if (sysfs_streq(buf, "offline"))
+ online_type = MMOP_OFFLINE;
else {
ret = -EINVAL;
goto err;
}
switch (online_type) {
- case ONLINE_KERNEL:
- case ONLINE_MOVABLE:
- case ONLINE_KEEP:
+ case MMOP_ONLINE_KERNEL:
+ case MMOP_ONLINE_MOVABLE:
+ case MMOP_ONLINE_KEEP:
/*
* mem->online_type is not protected so there can be a
* race here. However, when racing online, the first
@@ -342,7 +342,7 @@ store_mem_state(struct device *dev,
mem->online_type = online_type;
ret = device_online(&mem->dev);
break;
- case -1:
+ case MMOP_OFFLINE:
ret = device_offline(&mem->dev);
break;
default:
@@ -406,7 +406,9 @@ memory_probe_store(struct device *dev, struct device_attribute *attr,
int i, ret;
unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
- phys_addr = simple_strtoull(buf, NULL, 0);
+ ret = kstrtoull(buf, 0, &phys_addr);
+ if (ret)
+ return ret;
if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
return -EINVAL;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 8f7ed9933a7c..c6d3ae05f1ca 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -126,7 +126,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, K(node_page_state(nid, NR_FILE_PAGES)),
nid, K(node_page_state(nid, NR_FILE_MAPPED)),
nid, K(node_page_state(nid, NR_ANON_PAGES)),
- nid, K(node_page_state(nid, NR_SHMEM)),
+ nid, K(i.sharedram),
nid, node_page_state(nid, NR_KERNEL_STACK) *
THREAD_SIZE / 1024,
nid, K(node_page_state(nid, NR_PAGETABLE)),
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index bf412961a934..b67d9aef9fe4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -465,6 +465,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
* device_resume_noirq - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
@@ -594,6 +595,7 @@ static void dpm_resume_noirq(pm_message_t state)
* device_resume_early - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
@@ -1004,6 +1006,7 @@ static pm_message_t resume_event(pm_message_t sleep_state)
* device_suspend_noirq - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
@@ -1144,6 +1147,7 @@ static int dpm_suspend_noirq(pm_message_t state)
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
@@ -1298,6 +1302,7 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end);
* @dev: Device to suspend.
* @state: PM transition of the system being carried out.
* @cb: Suspend callback to execute.
+ * @info: string description of caller.
*/
static int legacy_suspend(struct device *dev, pm_message_t state,
int (*cb)(struct device *dev, pm_message_t state),
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 125d84505738..811e11c82f32 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -6741,11 +6741,11 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -ENOMEM;
if (DataTransferLength > 0)
{
- DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
- DataTransferLength, &DataTransferBufferDMA);
+ DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice,
+ DataTransferLength,
+ &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
break;
- memset(DataTransferBuffer, 0, DataTransferLength);
}
else if (DataTransferLength < 0)
{
@@ -6877,11 +6877,11 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
ErrorCode = -ENOMEM;
if (DataTransferLength > 0)
{
- DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
- DataTransferLength, &DataTransferBufferDMA);
+ DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice,
+ DataTransferLength,
+ &DataTransferBufferDMA);
if (DataTransferBuffer == NULL)
break;
- memset(DataTransferBuffer, 0, DataTransferLength);
}
else if (DataTransferLength < 0)
{
@@ -6899,14 +6899,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
RequestSenseLength = UserCommand.RequestSenseLength;
if (RequestSenseLength > 0)
{
- RequestSenseBuffer = pci_alloc_consistent(Controller->PCIDevice,
- RequestSenseLength, &RequestSenseBufferDMA);
+ RequestSenseBuffer = pci_zalloc_consistent(Controller->PCIDevice,
+ RequestSenseLength,
+ &RequestSenseBufferDMA);
if (RequestSenseBuffer == NULL)
{
ErrorCode = -ENOMEM;
goto Failure2;
}
- memset(RequestSenseBuffer, 0, RequestSenseLength);
}
spin_lock_irqsave(&Controller->queue_lock, flags);
while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 4595c22f33f7..ff20f192b0f6 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1014,24 +1014,21 @@ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
- c = (CommandList_struct *) pci_alloc_consistent(h->pdev,
- sizeof(CommandList_struct), &cmd_dma_handle);
+ c = pci_zalloc_consistent(h->pdev, sizeof(CommandList_struct),
+ &cmd_dma_handle);
if (c == NULL)
return NULL;
- memset(c, 0, sizeof(CommandList_struct));
c->cmdindex = -1;
- c->err_info = (ErrorInfo_struct *)
- pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
- &err_dma_handle);
+ c->err_info = pci_zalloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
+ &err_dma_handle);
if (c->err_info == NULL) {
pci_free_consistent(h->pdev,
sizeof(CommandList_struct), c, cmd_dma_handle);
return NULL;
}
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
INIT_LIST_HEAD(&c->list);
c->busaddr = (__u32) cmd_dma_handle;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b2c98c1bc037..623c84145b79 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -42,6 +42,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/workqueue.h>
#include "rbd_types.h"
@@ -332,7 +333,10 @@ struct rbd_device {
char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
+ struct list_head rq_queue; /* incoming rq queue */
spinlock_t lock; /* queue, flags, open_count */
+ struct workqueue_struct *rq_wq;
+ struct work_struct rq_work;
struct rbd_image_header header;
unsigned long flags; /* possibly lock protected */
@@ -514,7 +518,8 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
+static int rbd_dev_header_info(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -971,12 +976,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
header->snap_names = snap_names;
header->snap_sizes = snap_sizes;
- /* Make sure mapping size is consistent with header info */
-
- if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
- if (rbd_dev->mapping.size != header->image_size)
- rbd_dev->mapping.size = header->image_size;
-
return 0;
out_2big:
ret = -EIO;
@@ -1139,6 +1138,13 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
rbd_dev->mapping.features = 0;
}
+static void rbd_segment_name_free(const char *name)
+{
+ /* The explicit cast here is needed to drop the const qualifier */
+
+ kmem_cache_free(rbd_segment_name_cache, (void *)name);
+}
+
static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
{
char *name;
@@ -1158,20 +1164,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
pr_err("error formatting segment name for #%llu (%d)\n",
segment, ret);
- kfree(name);
+ rbd_segment_name_free(name);
name = NULL;
}
return name;
}
-static void rbd_segment_name_free(const char *name)
-{
- /* The explicit cast here is needed to drop the const qualifier */
-
- kmem_cache_free(rbd_segment_name_cache, (void *)name);
-}
-
static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
{
u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
@@ -1371,7 +1370,7 @@ static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
struct rbd_device *rbd_dev;
rbd_dev = obj_request->img_request->rbd_dev;
- rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
+ rbd_warn(rbd_dev, "obj_request %p already marked img_data",
obj_request);
}
}
@@ -1389,7 +1388,7 @@ static void obj_request_done_set(struct rbd_obj_request *obj_request)
if (obj_request_img_data_test(obj_request))
rbd_dev = obj_request->img_request->rbd_dev;
- rbd_warn(rbd_dev, "obj_request %p already marked done\n",
+ rbd_warn(rbd_dev, "obj_request %p already marked done",
obj_request);
}
}
@@ -1527,11 +1526,37 @@ static bool obj_request_type_valid(enum obj_request_type type)
static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
struct rbd_obj_request *obj_request)
{
- dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
-
+ dout("%s %p\n", __func__, obj_request);
return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
}
+static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
+{
+ dout("%s %p\n", __func__, obj_request);
+ ceph_osdc_cancel_request(obj_request->osd_req);
+}
+
+/*
+ * Wait for an object request to complete. If interrupted, cancel the
+ * underlying osd request.
+ */
+static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
+{
+ int ret;
+
+ dout("%s %p\n", __func__, obj_request);
+
+ ret = wait_for_completion_interruptible(&obj_request->completion);
+ if (ret < 0) {
+ dout("%s %p interrupted\n", __func__, obj_request);
+ rbd_obj_request_end(obj_request);
+ return ret;
+ }
+
+ dout("%s %p done\n", __func__, obj_request);
+ return 0;
+}
+
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
@@ -1558,15 +1583,6 @@ static void rbd_img_request_complete(struct rbd_img_request *img_request)
rbd_img_request_put(img_request);
}
-/* Caller is responsible for rbd_obj_request_destroy(obj_request) */
-
-static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
-{
- dout("%s: obj %p\n", __func__, obj_request);
-
- return wait_for_completion_interruptible(&obj_request->completion);
-}
-
/*
* The default/initial value for all image request flags is 0. Each
* is conditionally set to 1 at image request initialization time
@@ -1763,7 +1779,7 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
rbd_osd_trivial_callback(obj_request);
break;
default:
- rbd_warn(NULL, "%s: unsupported op %hu\n",
+ rbd_warn(NULL, "%s: unsupported op %hu",
obj_request->object_name, (unsigned short) opcode);
break;
}
@@ -1998,7 +2014,7 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
if (!counter)
rbd_dev_unparent(rbd_dev);
else
- rbd_warn(rbd_dev, "parent reference underflow\n");
+ rbd_warn(rbd_dev, "parent reference underflow");
}
/*
@@ -2028,7 +2044,7 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
/* Image was flattened, but parent is not yet torn down */
if (counter < 0)
- rbd_warn(rbd_dev, "parent reference overflow\n");
+ rbd_warn(rbd_dev, "parent reference overflow");
return false;
}
@@ -2045,7 +2061,7 @@ static struct rbd_img_request *rbd_img_request_create(
{
struct rbd_img_request *img_request;
- img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
+ img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
if (!img_request)
return NULL;
@@ -2161,11 +2177,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
if (result) {
struct rbd_device *rbd_dev = img_request->rbd_dev;
- rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
+ rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
img_request_write_test(img_request) ? "write" : "read",
obj_request->length, obj_request->img_offset,
obj_request->offset);
- rbd_warn(rbd_dev, " result %d xferred %x\n",
+ rbd_warn(rbd_dev, " result %d xferred %x",
result, xferred);
if (!img_request->result)
img_request->result = result;
@@ -2946,154 +2962,135 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
+
+ /*
+ * Until adequate refresh error handling is in place, there is
+ * not much we can do here, except warn.
+ *
+ * See http://tracker.ceph.com/issues/5040
+ */
ret = rbd_dev_refresh(rbd_dev);
if (ret)
- rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
+ rbd_warn(rbd_dev, "refresh failed: %d", ret);
- rbd_obj_notify_ack_sync(rbd_dev, notify_id);
+ ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
+ if (ret)
+ rbd_warn(rbd_dev, "notify_ack ret %d", ret);
}
/*
- * Initiate a watch request, synchronously.
+ * Send a (un)watch request and wait for the ack. Return a request
+ * with a ref held on success or error.
*/
-static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
+static struct rbd_obj_request *rbd_obj_watch_request_helper(
+ struct rbd_device *rbd_dev,
+ bool watch)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
int ret;
- rbd_assert(!rbd_dev->watch_event);
- rbd_assert(!rbd_dev->watch_request);
-
- ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
- &rbd_dev->watch_event);
- if (ret < 0)
- return ret;
-
- rbd_assert(rbd_dev->watch_event);
-
obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
OBJ_REQUEST_NODATA);
- if (!obj_request) {
- ret = -ENOMEM;
- goto out_cancel;
- }
+ if (!obj_request)
+ return ERR_PTR(-ENOMEM);
obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
obj_request);
if (!obj_request->osd_req) {
ret = -ENOMEM;
- goto out_put;
+ goto out;
}
- ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
-
osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie, 0, 1);
+ rbd_dev->watch_event->cookie, 0, watch);
rbd_osd_req_format_write(obj_request);
+ if (watch)
+ ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
+
ret = rbd_obj_request_submit(osdc, obj_request);
if (ret)
- goto out_linger;
+ goto out;
ret = rbd_obj_request_wait(obj_request);
if (ret)
- goto out_linger;
+ goto out;
ret = obj_request->result;
- if (ret)
- goto out_linger;
-
- /*
- * A watch request is set to linger, so the underlying osd
- * request won't go away until we unregister it. We retain
- * a pointer to the object request during that time (in
- * rbd_dev->watch_request), so we'll keep a reference to
- * it. We'll drop that reference (below) after we've
- * unregistered it.
- */
- rbd_dev->watch_request = obj_request;
+ if (ret) {
+ if (watch)
+ rbd_obj_request_end(obj_request);
+ goto out;
+ }
- return 0;
+ return obj_request;
-out_linger:
- ceph_osdc_unregister_linger_request(osdc, obj_request->osd_req);
-out_put:
+out:
rbd_obj_request_put(obj_request);
-out_cancel:
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
-
- return ret;
+ return ERR_PTR(ret);
}
/*
- * Tear down a watch request, synchronously.
+ * Initiate a watch request, synchronously.
*/
-static int __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
int ret;
- rbd_assert(rbd_dev->watch_event);
- rbd_assert(rbd_dev->watch_request);
+ rbd_assert(!rbd_dev->watch_event);
+ rbd_assert(!rbd_dev->watch_request);
- obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request) {
- ret = -ENOMEM;
- goto out_cancel;
- }
+ ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
+ &rbd_dev->watch_event);
+ if (ret < 0)
+ return ret;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, 1,
- obj_request);
- if (!obj_request->osd_req) {
- ret = -ENOMEM;
- goto out_put;
+ obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
+ if (IS_ERR(obj_request)) {
+ ceph_osdc_cancel_event(rbd_dev->watch_event);
+ rbd_dev->watch_event = NULL;
+ return PTR_ERR(obj_request);
}
- osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie, 0, 0);
- rbd_osd_req_format_write(obj_request);
-
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out_put;
+ /*
+ * A watch request is set to linger, so the underlying osd
+ * request won't go away until we unregister it. We retain
+ * a pointer to the object request during that time (in
+ * rbd_dev->watch_request), so we'll keep a reference to it.
+ * We'll drop that reference after we've unregistered it in
+ * rbd_dev_header_unwatch_sync().
+ */
+ rbd_dev->watch_request = obj_request;
- ret = rbd_obj_request_wait(obj_request);
- if (ret)
- goto out_put;
+ return 0;
+}
- ret = obj_request->result;
- if (ret)
- goto out_put;
+/*
+ * Tear down a watch request, synchronously.
+ */
+static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+ struct rbd_obj_request *obj_request;
- /* We have successfully torn down the watch request */
+ rbd_assert(rbd_dev->watch_event);
+ rbd_assert(rbd_dev->watch_request);
- ceph_osdc_unregister_linger_request(osdc,
- rbd_dev->watch_request->osd_req);
+ rbd_obj_request_end(rbd_dev->watch_request);
rbd_obj_request_put(rbd_dev->watch_request);
rbd_dev->watch_request = NULL;
-out_put:
- rbd_obj_request_put(obj_request);
-out_cancel:
+ obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
+ if (!IS_ERR(obj_request))
+ rbd_obj_request_put(obj_request);
+ else
+ rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
+ PTR_ERR(obj_request));
+
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
-
- return ret;
-}
-
-static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
-{
- int ret;
-
- ret = __rbd_dev_header_unwatch_sync(rbd_dev);
- if (ret) {
- rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
- ret);
- }
}
/*
@@ -3183,102 +3180,129 @@ out:
return ret;
}
-static void rbd_request_fn(struct request_queue *q)
- __releases(q->queue_lock) __acquires(q->queue_lock)
+static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
{
- struct rbd_device *rbd_dev = q->queuedata;
- struct request *rq;
+ struct rbd_img_request *img_request;
+ u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
+ u64 length = blk_rq_bytes(rq);
+ bool wr = rq_data_dir(rq) == WRITE;
int result;
- while ((rq = blk_fetch_request(q))) {
- bool write_request = rq_data_dir(rq) == WRITE;
- struct rbd_img_request *img_request;
- u64 offset;
- u64 length;
+ /* Ignore/skip any zero-length requests */
- /* Ignore any non-FS requests that filter through. */
+ if (!length) {
+ dout("%s: zero-length request\n", __func__);
+ result = 0;
+ goto err_rq;
+ }
- if (rq->cmd_type != REQ_TYPE_FS) {
- dout("%s: non-fs request type %d\n", __func__,
- (int) rq->cmd_type);
- __blk_end_request_all(rq, 0);
- continue;
+ /* Disallow writes to a read-only device */
+
+ if (wr) {
+ if (rbd_dev->mapping.read_only) {
+ result = -EROFS;
+ goto err_rq;
}
+ rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
+ }
- /* Ignore/skip any zero-length requests */
+ /*
+ * Quit early if the mapped snapshot no longer exists. It's
+ * still possible the snapshot will have disappeared by the
+ * time our request arrives at the osd, but there's no sense in
+ * sending it if we already know.
+ */
+ if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
+ dout("request for non-existent snapshot");
+ rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
+ result = -ENXIO;
+ goto err_rq;
+ }
- offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
- length = (u64) blk_rq_bytes(rq);
+ if (offset && length > U64_MAX - offset + 1) {
+ rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
+ length);
+ result = -EINVAL;
+ goto err_rq; /* Shouldn't happen */
+ }
- if (!length) {
- dout("%s: zero-length request\n", __func__);
- __blk_end_request_all(rq, 0);
- continue;
- }
+ if (offset + length > rbd_dev->mapping.size) {
+ rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
+ length, rbd_dev->mapping.size);
+ result = -EIO;
+ goto err_rq;
+ }
- spin_unlock_irq(q->queue_lock);
+ img_request = rbd_img_request_create(rbd_dev, offset, length, wr);
+ if (!img_request) {
+ result = -ENOMEM;
+ goto err_rq;
+ }
+ img_request->rq = rq;
- /* Disallow writes to a read-only device */
+ result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, rq->bio);
+ if (result)
+ goto err_img_request;
- if (write_request) {
- result = -EROFS;
- if (rbd_dev->mapping.read_only)
- goto end_request;
- rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
- }
+ result = rbd_img_request_submit(img_request);
+ if (result)
+ goto err_img_request;
- /*
- * Quit early if the mapped snapshot no longer
- * exists. It's still possible the snapshot will
- * have disappeared by the time our request arrives
- * at the osd, but there's no sense in sending it if
- * we already know.
- */
- if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
- dout("request for non-existent snapshot");
- rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
- result = -ENXIO;
- goto end_request;
- }
+ return;
- result = -EINVAL;
- if (offset && length > U64_MAX - offset + 1) {
- rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
- offset, length);
- goto end_request; /* Shouldn't happen */
- }
+err_img_request:
+ rbd_img_request_put(img_request);
+err_rq:
+ if (result)
+ rbd_warn(rbd_dev, "%s %llx at %llx result %d",
+ wr ? "write" : "read", length, offset, result);
+ blk_end_request_all(rq, result);
+}
- result = -EIO;
- if (offset + length > rbd_dev->mapping.size) {
- rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
- offset, length, rbd_dev->mapping.size);
- goto end_request;
- }
+static void rbd_request_workfn(struct work_struct *work)
+{
+ struct rbd_device *rbd_dev =
+ container_of(work, struct rbd_device, rq_work);
+ struct request *rq, *next;
+ LIST_HEAD(requests);
- result = -ENOMEM;
- img_request = rbd_img_request_create(rbd_dev, offset, length,
- write_request);
- if (!img_request)
- goto end_request;
+ spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
+ list_splice_init(&rbd_dev->rq_queue, &requests);
+ spin_unlock_irq(&rbd_dev->lock);
- img_request->rq = rq;
+ list_for_each_entry_safe(rq, next, &requests, queuelist) {
+ list_del_init(&rq->queuelist);
+ rbd_handle_request(rbd_dev, rq);
+ }
+}
- result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
- rq->bio);
- if (!result)
- result = rbd_img_request_submit(img_request);
- if (result)
- rbd_img_request_put(img_request);
-end_request:
- spin_lock_irq(q->queue_lock);
- if (result < 0) {
- rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
- write_request ? "write" : "read",
- length, offset, result);
-
- __blk_end_request_all(rq, result);
+/*
+ * Called with q->queue_lock held and interrupts disabled, possibly on
+ * the way to schedule(). Do not sleep here!
+ */
+static void rbd_request_fn(struct request_queue *q)
+{
+ struct rbd_device *rbd_dev = q->queuedata;
+ struct request *rq;
+ int queued = 0;
+
+ rbd_assert(rbd_dev);
+
+ while ((rq = blk_fetch_request(q))) {
+ /* Ignore any non-FS requests that filter through. */
+ if (rq->cmd_type != REQ_TYPE_FS) {
+ dout("%s: non-fs request type %d\n", __func__,
+ (int) rq->cmd_type);
+ __blk_end_request_all(rq, 0);
+ continue;
}
+
+ list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
+ queued++;
}
+
+ if (queued)
+ queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work);
}
/*
@@ -3517,24 +3541,37 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
u64 mapping_size;
int ret;
- rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
down_write(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
- if (rbd_dev->image_format == 1)
- ret = rbd_dev_v1_header_info(rbd_dev);
- else
- ret = rbd_dev_v2_header_info(rbd_dev);
- /* If it's a mapped snapshot, validate its EXISTS flag */
+ ret = rbd_dev_header_info(rbd_dev);
+ if (ret)
+ return ret;
+
+ /*
+ * If there is a parent, see if it has disappeared due to the
+ * mapped image getting flattened.
+ */
+ if (rbd_dev->parent) {
+ ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret)
+ return ret;
+ }
+
+ if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
+ if (rbd_dev->mapping.size != rbd_dev->header.image_size)
+ rbd_dev->mapping.size = rbd_dev->header.image_size;
+ } else {
+ /* validate mapped snapshot's EXISTS flag */
+ rbd_exists_validate(rbd_dev);
+ }
- rbd_exists_validate(rbd_dev);
up_write(&rbd_dev->header_rwsem);
- if (mapping_size != rbd_dev->mapping.size) {
+ if (mapping_size != rbd_dev->mapping.size)
rbd_dev_update_size(rbd_dev);
- }
- return ret;
+ return 0;
}
static int rbd_init_disk(struct rbd_device *rbd_dev)
@@ -3696,46 +3733,36 @@ static ssize_t rbd_snap_show(struct device *dev,
}
/*
- * For an rbd v2 image, shows the pool id, image id, and snapshot id
- * for the parent image. If there is no parent, simply shows
- * "(no parent image)".
+ * For a v2 image, shows the chain of parent images, separated by empty
+ * lines. For v1 images or if there is no parent, shows "(no parent
+ * image)".
*/
static ssize_t rbd_parent_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
- struct rbd_spec *spec = rbd_dev->parent_spec;
- int count;
- char *bufp = buf;
+ ssize_t count = 0;
- if (!spec)
+ if (!rbd_dev->parent)
return sprintf(buf, "(no parent image)\n");
- count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
- (unsigned long long) spec->pool_id, spec->pool_name);
- if (count < 0)
- return count;
- bufp += count;
-
- count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
- spec->image_name ? spec->image_name : "(unknown)");
- if (count < 0)
- return count;
- bufp += count;
-
- count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
- (unsigned long long) spec->snap_id, spec->snap_name);
- if (count < 0)
- return count;
- bufp += count;
-
- count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
- if (count < 0)
- return count;
- bufp += count;
+ for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
+ struct rbd_spec *spec = rbd_dev->parent_spec;
+
+ count += sprintf(&buf[count], "%s"
+ "pool_id %llu\npool_name %s\n"
+ "image_id %s\nimage_name %s\n"
+ "snap_id %llu\nsnap_name %s\n"
+ "overlap %llu\n",
+ !count ? "" : "\n", /* first? */
+ spec->pool_id, spec->pool_name,
+ spec->image_id, spec->image_name ?: "(unknown)",
+ spec->snap_id, spec->snap_name,
+ rbd_dev->parent_overlap);
+ }
- return (ssize_t) (bufp - buf);
+ return count;
}
static ssize_t rbd_image_refresh(struct device *dev,
@@ -3748,9 +3775,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
ret = rbd_dev_refresh(rbd_dev);
if (ret)
- rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
+ return ret;
- return ret < 0 ? ret : size;
+ return size;
}
static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
@@ -3822,6 +3849,9 @@ static struct rbd_spec *rbd_spec_alloc(void)
spec = kzalloc(sizeof (*spec), GFP_KERNEL);
if (!spec)
return NULL;
+
+ spec->pool_id = CEPH_NOPOOL;
+ spec->snap_id = CEPH_NOSNAP;
kref_init(&spec->kref);
return spec;
@@ -3848,6 +3878,8 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
return NULL;
spin_lock_init(&rbd_dev->lock);
+ INIT_LIST_HEAD(&rbd_dev->rq_queue);
+ INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
rbd_dev->flags = 0;
atomic_set(&rbd_dev->parent_ref, 0);
INIT_LIST_HEAD(&rbd_dev->node);
@@ -4021,7 +4053,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
goto out_err;
}
- snapid = cpu_to_le64(CEPH_NOSNAP);
+ snapid = cpu_to_le64(rbd_dev->spec->snap_id);
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
"rbd", "get_parent",
&snapid, sizeof (snapid),
@@ -4059,7 +4091,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
ret = -EIO;
if (pool_id > (u64)U32_MAX) {
- rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
+ rbd_warn(NULL, "parent pool id too large (%llu > %u)",
(unsigned long long)pool_id, U32_MAX);
goto out_err;
}
@@ -4083,6 +4115,8 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
parent_spec->snap_id = snap_id;
rbd_dev->parent_spec = parent_spec;
parent_spec = NULL; /* rbd_dev now owns this */
+ } else {
+ kfree(image_id);
}
/*
@@ -4110,8 +4144,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* overlap is zero we just pretend there was
* no parent image.
*/
- rbd_warn(rbd_dev, "ignoring parent of "
- "clone with overlap 0\n");
+ rbd_warn(rbd_dev, "ignoring parent with overlap 0");
}
}
out:
@@ -4279,18 +4312,38 @@ static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
}
/*
- * When an rbd image has a parent image, it is identified by the
- * pool, image, and snapshot ids (not names). This function fills
- * in the names for those ids. (It's OK if we can't figure out the
- * name for an image id, but the pool and snapshot ids should always
- * exist and have names.) All names in an rbd spec are dynamically
- * allocated.
+ * An image being mapped will have everything but the snap id.
+ */
+static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
+{
+ struct rbd_spec *spec = rbd_dev->spec;
+
+ rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
+ rbd_assert(spec->image_id && spec->image_name);
+ rbd_assert(spec->snap_name);
+
+ if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
+ u64 snap_id;
+
+ snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
+ if (snap_id == CEPH_NOSNAP)
+ return -ENOENT;
+
+ spec->snap_id = snap_id;
+ } else {
+ spec->snap_id = CEPH_NOSNAP;
+ }
+
+ return 0;
+}
+
+/*
+ * A parent image will have all ids but none of the names.
*
- * When an image being mapped (not a parent) is probed, we have the
- * pool name and pool id, image name and image id, and the snapshot
- * name. The only thing we're missing is the snapshot id.
+ * All names in an rbd spec are dynamically allocated. It's OK if we
+ * can't figure out the name for an image id.
*/
-static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
+static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_spec *spec = rbd_dev->spec;
@@ -4299,24 +4352,9 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
const char *snap_name;
int ret;
- /*
- * An image being mapped will have the pool name (etc.), but
- * we need to look up the snapshot id.
- */
- if (spec->pool_name) {
- if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
- u64 snap_id;
-
- snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
- if (snap_id == CEPH_NOSNAP)
- return -ENOENT;
- spec->snap_id = snap_id;
- } else {
- spec->snap_id = CEPH_NOSNAP;
- }
-
- return 0;
- }
+ rbd_assert(spec->pool_id != CEPH_NOPOOL);
+ rbd_assert(spec->image_id);
+ rbd_assert(spec->snap_id != CEPH_NOSNAP);
/* Get the pool name; we have to make our own copy of this */
@@ -4335,7 +4373,7 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
if (!image_name)
rbd_warn(rbd_dev, "unable to get image name");
- /* Look up the snapshot name, and make a copy */
+ /* Fetch the snapshot name */
snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
if (IS_ERR(snap_name)) {
@@ -4348,10 +4386,10 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
spec->snap_name = snap_name;
return 0;
+
out_err:
kfree(image_name);
kfree(pool_name);
-
return ret;
}
@@ -4483,43 +4521,22 @@ static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
return ret;
}
- /*
- * If the image supports layering, get the parent info. We
- * need to probe the first time regardless. Thereafter we
- * only need to if there's a parent, to see if it has
- * disappeared due to the mapped image getting flattened.
- */
- if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
- (first_time || rbd_dev->parent_spec)) {
- bool warn;
-
- ret = rbd_dev_v2_parent_info(rbd_dev);
- if (ret)
- return ret;
-
- /*
- * Print a warning if this is the initial probe and
- * the image has a parent. Don't print it if the
- * image now being probed is itself a parent. We
- * can tell at this point because we won't know its
- * pool name yet (just its pool id).
- */
- warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
- if (first_time && warn)
- rbd_warn(rbd_dev, "WARNING: kernel layering "
- "is EXPERIMENTAL!");
- }
-
- if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
- if (rbd_dev->mapping.size != rbd_dev->header.image_size)
- rbd_dev->mapping.size = rbd_dev->header.image_size;
-
ret = rbd_dev_v2_snap_context(rbd_dev);
dout("rbd_dev_v2_snap_context returned %d\n", ret);
return ret;
}
+static int rbd_dev_header_info(struct rbd_device *rbd_dev)
+{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+
+ if (rbd_dev->image_format == 1)
+ return rbd_dev_v1_header_info(rbd_dev);
+
+ return rbd_dev_v2_header_info(rbd_dev);
+}
+
static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
{
struct device *dev;
@@ -5066,12 +5083,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
ret = rbd_dev_mapping_set(rbd_dev);
if (ret)
goto err_out_disk;
+
set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
+ rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0);
+ if (!rbd_dev->rq_wq)
+ goto err_out_mapping;
+
ret = rbd_bus_add_dev(rbd_dev);
if (ret)
- goto err_out_mapping;
+ goto err_out_workqueue;
/* Everything's ready. Announce the disk to the world. */
@@ -5083,6 +5105,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
return ret;
+err_out_workqueue:
+ destroy_workqueue(rbd_dev->rq_wq);
+ rbd_dev->rq_wq = NULL;
err_out_mapping:
rbd_dev_mapping_clear(rbd_dev);
err_out_disk:
@@ -5155,8 +5180,6 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
ret = rbd_dev_image_id(rbd_dev);
if (ret)
return ret;
- rbd_assert(rbd_dev->spec->image_id);
- rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
ret = rbd_dev_header_name(rbd_dev);
if (ret)
@@ -5168,25 +5191,45 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
goto out_header_name;
}
- if (rbd_dev->image_format == 1)
- ret = rbd_dev_v1_header_info(rbd_dev);
- else
- ret = rbd_dev_v2_header_info(rbd_dev);
+ ret = rbd_dev_header_info(rbd_dev);
if (ret)
goto err_out_watch;
- ret = rbd_dev_spec_update(rbd_dev);
+ /*
+ * If this image is the one being mapped, we have pool name and
+ * id, image name and id, and snap name - need to fill snap id.
+ * Otherwise this is a parent image, identified by pool, image
+ * and snap ids - need to fill in names for those ids.
+ */
+ if (mapping)
+ ret = rbd_spec_fill_snap_id(rbd_dev);
+ else
+ ret = rbd_spec_fill_names(rbd_dev);
if (ret)
goto err_out_probe;
+ if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
+ ret = rbd_dev_v2_parent_info(rbd_dev);
+ if (ret)
+ goto err_out_probe;
+
+ /*
+ * Need to warn users if this image is the one being
+ * mapped and has a parent.
+ */
+ if (mapping && rbd_dev->parent_spec)
+ rbd_warn(rbd_dev,
+ "WARNING: kernel layering is EXPERIMENTAL!");
+ }
+
ret = rbd_dev_probe_parent(rbd_dev);
if (ret)
goto err_out_probe;
dout("discovered format %u image, header name is %s\n",
rbd_dev->image_format, rbd_dev->header_name);
-
return 0;
+
err_out_probe:
rbd_dev_unprobe(rbd_dev);
err_out_watch:
@@ -5199,9 +5242,6 @@ err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
-
- dout("probe failed, returning %d\n", ret);
-
return ret;
}
@@ -5243,7 +5283,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
/* The ceph file layout needs to fit pool id in 32 bits */
if (spec->pool_id > (u64)U32_MAX) {
- rbd_warn(NULL, "pool id too large (%llu > %u)\n",
+ rbd_warn(NULL, "pool id too large (%llu > %u)",
(unsigned long long)spec->pool_id, U32_MAX);
rc = -EIO;
goto err_out_client;
@@ -5314,6 +5354,7 @@ static void rbd_dev_device_release(struct device *dev)
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ destroy_workqueue(rbd_dev->rq_wq);
rbd_free_disk(rbd_dev);
clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
rbd_dev_mapping_clear(rbd_dev);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 608532d3f8c9..f0a089df85cc 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -4112,16 +4112,14 @@ static int skd_cons_skcomp(struct skd_device *skdev)
skdev->name, __func__, __LINE__,
nbytes, SKD_N_COMPLETION_ENTRY);
- skcomp = pci_alloc_consistent(skdev->pdev, nbytes,
- &skdev->cq_dma_address);
+ skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
+ &skdev->cq_dma_address);
if (skcomp == NULL) {
rc = -ENOMEM;
goto err_out;
}
- memset(skcomp, 0, nbytes);
-
skdev->skcomp_table = skcomp;
skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
sizeof(*skcomp) *
@@ -4304,15 +4302,14 @@ static int skd_cons_skspcl(struct skd_device *skdev)
nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
+ skspcl->msg_buf =
+ pci_zalloc_consistent(skdev->pdev, nbytes,
+ &skspcl->mb_dma_address);
if (skspcl->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
}
- memset(skspcl->msg_buf, 0, nbytes);
-
skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
if (skspcl->req.sg == NULL) {
@@ -4353,25 +4350,21 @@ static int skd_cons_sksb(struct skd_device *skdev)
nbytes = SKD_N_INTERNAL_BYTES;
- skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes,
- &skspcl->db_dma_address);
+ skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
+ &skspcl->db_dma_address);
if (skspcl->data_buf == NULL) {
rc = -ENOMEM;
goto err_out;
}
- memset(skspcl->data_buf, 0, nbytes);
-
nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
+ skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
+ &skspcl->mb_dma_address);
if (skspcl->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
}
- memset(skspcl->msg_buf, 0, nbytes);
-
skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
&skspcl->req.sksg_dma_address);
if (skspcl->req.sksg_list == NULL) {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 36e54be402df..dfa4024c448a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -183,19 +183,32 @@ static ssize_t comp_algorithm_store(struct device *dev,
static int zram_test_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- return meta->table[index].flags & BIT(flag);
+ return meta->table[index].value & BIT(flag);
}
static void zram_set_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- meta->table[index].flags |= BIT(flag);
+ meta->table[index].value |= BIT(flag);
}
static void zram_clear_flag(struct zram_meta *meta, u32 index,
enum zram_pageflags flag)
{
- meta->table[index].flags &= ~BIT(flag);
+ meta->table[index].value &= ~BIT(flag);
+}
+
+static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
+{
+ return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
+}
+
+static void zram_set_obj_size(struct zram_meta *meta,
+ u32 index, size_t size)
+{
+ unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
+
+ meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
}
static inline int is_partial_io(struct bio_vec *bvec)
@@ -255,7 +268,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
goto free_table;
}
- rwlock_init(&meta->tb_lock);
return meta;
free_table:
@@ -304,7 +316,12 @@ static void handle_zero_page(struct bio_vec *bvec)
flush_dcache_page(page);
}
-/* NOTE: caller should hold meta->tb_lock with write-side */
+
+/*
+ * To protect concurrent access to the same index entry,
+ * caller should hold this table index entry's bit_spinlock to
+ * indicate this index entry is accessing.
+ */
static void zram_free_page(struct zram *zram, size_t index)
{
struct zram_meta *meta = zram->meta;
@@ -324,11 +341,12 @@ static void zram_free_page(struct zram *zram, size_t index)
zs_free(meta->mem_pool, handle);
- atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
+ atomic64_sub(zram_get_obj_size(meta, index),
+ &zram->stats.compr_data_size);
atomic64_dec(&zram->stats.pages_stored);
meta->table[index].handle = 0;
- meta->table[index].size = 0;
+ zram_set_obj_size(meta, index, 0);
}
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
@@ -337,14 +355,14 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
unsigned char *cmem;
struct zram_meta *meta = zram->meta;
unsigned long handle;
- u16 size;
+ size_t size;
- read_lock(&meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
handle = meta->table[index].handle;
- size = meta->table[index].size;
+ size = zram_get_obj_size(meta, index);
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
clear_page(mem);
return 0;
}
@@ -355,7 +373,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
else
ret = zcomp_decompress(zram->comp, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret)) {
@@ -376,14 +394,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- read_lock(&meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
if (unlikely(!meta->table[index].handle) ||
zram_test_flag(meta, index, ZRAM_ZERO)) {
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
handle_zero_page(bvec);
return 0;
}
- read_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
@@ -461,10 +479,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
- write_lock(&zram->meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
zram_set_flag(meta, index, ZRAM_ZERO);
- write_unlock(&zram->meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
atomic64_inc(&zram->stats.zero_pages);
ret = 0;
@@ -514,12 +532,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
* Free memory associated with this sector
* before overwriting unused sectors.
*/
- write_lock(&zram->meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
meta->table[index].handle = handle;
- meta->table[index].size = clen;
- write_unlock(&zram->meta->tb_lock);
+ zram_set_obj_size(meta, index, clen);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
@@ -560,6 +578,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
int offset, struct bio *bio)
{
size_t n = bio->bi_iter.bi_size;
+ struct zram_meta *meta = zram->meta;
/*
* zram manages data in physical block size units. Because logical block
@@ -580,13 +599,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
while (n >= PAGE_SIZE) {
- /*
- * Discard request can be large so the lock hold times could be
- * lengthy. So take the lock once per page.
- */
- write_lock(&zram->meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
- write_unlock(&zram->meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
index++;
n -= PAGE_SIZE;
}
@@ -821,9 +836,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram = bdev->bd_disk->private_data;
meta = zram->meta;
- write_lock(&meta->tb_lock);
+ bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
zram_free_page(zram, index);
- write_unlock(&meta->tb_lock);
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
atomic64_inc(&zram->stats.notify_free);
}
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 7f21c145e317..5b0afde729cd 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -43,7 +43,6 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/*-- End of configurable params */
#define SECTOR_SHIFT 9
-#define SECTOR_SIZE (1 << SECTOR_SHIFT)
#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
#define ZRAM_LOGICAL_BLOCK_SHIFT 12
@@ -51,10 +50,24 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
-/* Flags for zram pages (table[page_no].flags) */
+
+/*
+ * The lower ZRAM_FLAG_SHIFT bits of table.value is for
+ * object size (excluding header), the higher bits is for
+ * zram_pageflags.
+ *
+ * zram is mainly used for memory efficiency so we want to keep memory
+ * footprint small so we can squeeze size and flags into a field.
+ * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
+ * the higher bits is for zram_pageflags.
+ */
+#define ZRAM_FLAG_SHIFT 24
+
+/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
/* Page consists entirely of zeros */
- ZRAM_ZERO,
+ ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
+ ZRAM_ACCESS, /* page in now accessed */
__NR_ZRAM_PAGEFLAGS,
};
@@ -62,11 +75,10 @@ enum zram_pageflags {
/*-- Data structures */
/* Allocated for each disk page */
-struct table {
+struct zram_table_entry {
unsigned long handle;
- u16 size; /* object size (excluding header) */
- u8 flags;
-} __aligned(4);
+ unsigned long value;
+};
struct zram_stats {
atomic64_t compr_data_size; /* compressed size of pages stored */
@@ -81,8 +93,7 @@ struct zram_stats {
};
struct zram_meta {
- rwlock_t tb_lock; /* protect table */
- struct table *table;
+ struct zram_table_entry *table;
struct zs_pool *mem_pool;
};
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1f37d9870e7a..603eb1be4f6a 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -50,6 +50,14 @@ config ARM_CCI
Driver supporting the CCI cache coherent interconnect for ARM
platforms.
+config ARM_CCN
+ bool "ARM CCN driver support"
+ depends on ARM || ARM64
+ depends on PERF_EVENTS
+ help
+ PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
+ interconnect.
+
config VEXPRESS_CONFIG
bool "Versatile Express configuration bus"
default y if ARCH_VEXPRESS
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 6a4ea7e4af1a..2973c18cbcc2 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
# Interconnect bus driver for OMAP SoCs.
obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
-# CCI cache coherent interconnect for ARM platforms
+
+# Interconnect bus drivers for ARM platforms
obj-$(CONFIG_ARM_CCI) += arm-cci.o
+obj-$(CONFIG_ARM_CCN) += arm-ccn.o
obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 5a86da97a70b..7af78df241f2 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -397,7 +397,8 @@ static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
hw_counter = &event->hw;
/* Did this counter overflow? */
- if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG)
+ if (!(pmu_read_register(idx, CCI_PMU_OVRFLW) &
+ CCI_PMU_OVRFLW_FLAG))
continue;
pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
new file mode 100644
index 000000000000..3266f8ff9311
--- /dev/null
+++ b/drivers/bus/arm-ccn.c
@@ -0,0 +1,1391 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2014 ARM Limited
+ */
+
+#include <linux/ctype.h>
+#include <linux/hrtimer.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CCN_NUM_XP_PORTS 2
+#define CCN_NUM_VCS 4
+#define CCN_NUM_REGIONS 256
+#define CCN_REGION_SIZE 0x10000
+
+#define CCN_ALL_OLY_ID 0xff00
+#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
+#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
+#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
+#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
+
+#define CCN_MN_ERRINT_STATUS 0x0008
+#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
+#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
+#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
+#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
+#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
+#define CCN_MN_ERR_SIG_VAL_63_0 0x0300
+#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
+
+#define CCN_DT_ACTIVE_DSM 0x0000
+#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
+#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
+#define CCN_DT_CTL 0x0028
+#define CCN_DT_CTL__DT_EN (1 << 0)
+#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
+#define CCN_DT_PMCCNTR 0x0140
+#define CCN_DT_PMCCNTRSR 0x0190
+#define CCN_DT_PMOVSR 0x0198
+#define CCN_DT_PMOVSR_CLR 0x01a0
+#define CCN_DT_PMCR 0x01a8
+#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
+#define CCN_DT_PMCR__PMU_EN (1 << 0)
+#define CCN_DT_PMSR 0x01b0
+#define CCN_DT_PMSR_REQ 0x01b8
+#define CCN_DT_PMSR_CLR 0x01c0
+
+#define CCN_HNF_PMU_EVENT_SEL 0x0600
+#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_XP_DT_CONFIG 0x0300
+#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
+#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
+#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
+#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
+#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
+#define CCN_XP_DT_INTERFACE_SEL 0x0308
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
+#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
+#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
+#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
+#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
+#define CCN_XP_DT_CONTROL 0x0370
+#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
+#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
+#define CCN_XP_PMU_EVENT_SEL 0x0600
+#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
+#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
+
+#define CCN_SBAS_PMU_EVENT_SEL 0x0600
+#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_RNI_PMU_EVENT_SEL 0x0600
+#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
+#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
+
+#define CCN_TYPE_MN 0x01
+#define CCN_TYPE_DT 0x02
+#define CCN_TYPE_HNF 0x04
+#define CCN_TYPE_HNI 0x05
+#define CCN_TYPE_XP 0x08
+#define CCN_TYPE_SBSX 0x0c
+#define CCN_TYPE_SBAS 0x10
+#define CCN_TYPE_RNI_1P 0x14
+#define CCN_TYPE_RNI_2P 0x15
+#define CCN_TYPE_RNI_3P 0x16
+#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
+#define CCN_TYPE_RND_2P 0x19
+#define CCN_TYPE_RND_3P 0x1a
+#define CCN_TYPE_CYCLES 0xff /* Pseudotype */
+
+#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
+
+#define CCN_NUM_PMU_EVENTS 4
+#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
+#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
+#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
+
+#define CCN_NUM_PREDEFINED_MASKS 4
+#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
+#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
+#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
+#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
+
+struct arm_ccn_component {
+ void __iomem *base;
+ u32 type;
+
+ DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
+ union {
+ struct {
+ DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
+ } xp;
+ };
+};
+
+#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
+ struct arm_ccn_dt, pmu), struct arm_ccn, dt)
+
+struct arm_ccn_dt {
+ int id;
+ void __iomem *base;
+
+ spinlock_t config_lock;
+
+ DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
+ struct {
+ struct arm_ccn_component *source;
+ struct perf_event *event;
+ } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
+
+ struct {
+ u64 l, h;
+ } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
+
+ struct hrtimer hrtimer;
+
+ struct pmu pmu;
+};
+
+struct arm_ccn {
+ struct device *dev;
+ void __iomem *base;
+ unsigned irq_used:1;
+ unsigned sbas_present:1;
+ unsigned sbsx_present:1;
+
+ int num_nodes;
+ struct arm_ccn_component *node;
+
+ int num_xps;
+ struct arm_ccn_component *xp;
+
+ struct arm_ccn_dt dt;
+};
+
+
+static int arm_ccn_node_to_xp(int node)
+{
+ return node / CCN_NUM_XP_PORTS;
+}
+
+static int arm_ccn_node_to_xp_port(int node)
+{
+ return node % CCN_NUM_XP_PORTS;
+}
+
+
+/*
+ * Bit shifts and masks in these defines must be kept in sync with
+ * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
+ */
+#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
+#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
+#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
+#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
+#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
+#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
+#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
+#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
+
+static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
+{
+ *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
+ *config |= (node_xp << 0) | (type << 8) | (port << 24);
+}
+
+static ssize_t arm_ccn_pmu_format_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *ea = container_of(attr,
+ struct dev_ext_attribute, attr);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
+}
+
+#define CCN_FORMAT_ATTR(_name, _config) \
+ struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
+ { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
+ NULL), _config }
+
+static CCN_FORMAT_ATTR(node, "config:0-7");
+static CCN_FORMAT_ATTR(xp, "config:0-7");
+static CCN_FORMAT_ATTR(type, "config:8-15");
+static CCN_FORMAT_ATTR(event, "config:16-23");
+static CCN_FORMAT_ATTR(port, "config:24-25");
+static CCN_FORMAT_ATTR(vc, "config:26-28");
+static CCN_FORMAT_ATTR(dir, "config:29-29");
+static CCN_FORMAT_ATTR(mask, "config:30-33");
+static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
+static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
+
+static struct attribute *arm_ccn_pmu_format_attrs[] = {
+ &arm_ccn_pmu_format_attr_node.attr.attr,
+ &arm_ccn_pmu_format_attr_xp.attr.attr,
+ &arm_ccn_pmu_format_attr_type.attr.attr,
+ &arm_ccn_pmu_format_attr_event.attr.attr,
+ &arm_ccn_pmu_format_attr_port.attr.attr,
+ &arm_ccn_pmu_format_attr_vc.attr.attr,
+ &arm_ccn_pmu_format_attr_dir.attr.attr,
+ &arm_ccn_pmu_format_attr_mask.attr.attr,
+ &arm_ccn_pmu_format_attr_cmp_l.attr.attr,
+ &arm_ccn_pmu_format_attr_cmp_h.attr.attr,
+ NULL
+};
+
+static struct attribute_group arm_ccn_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = arm_ccn_pmu_format_attrs,
+};
+
+
+struct arm_ccn_pmu_event {
+ struct device_attribute attr;
+ u32 type;
+ u32 event;
+ int num_ports;
+ int num_vcs;
+ const char *def;
+ int mask;
+};
+
+#define CCN_EVENT_ATTR(_name) \
+ __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
+
+/*
+ * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
+ * their ports in XP they are connected to. For the sake of usability they are
+ * explicitly defined here (and translated into a relevant watchpoint in
+ * arm_ccn_pmu_event_init()) so the user can easily request them without deep
+ * knowledge of the flit format.
+ */
+
+#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
+ .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
+ .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
+ .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNI(_name, _def, _mask) { \
+ .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
+ .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+ .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_SBSX(_name, _def, _mask) { \
+ .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
+ .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
+ .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
+
+#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
+ .type = CCN_TYPE_HNF, .event = _event, }
+
+#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
+ .type = CCN_TYPE_XP, .event = _event, \
+ .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
+
+/*
+ * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
+ * on configuration. One of them is picked to represent the whole group,
+ * as they all share the same event types.
+ */
+#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
+ .type = CCN_TYPE_RNI_3P, .event = _event, }
+
+#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
+ .type = CCN_TYPE_SBAS, .event = _event, }
+
+#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
+ .type = CCN_TYPE_CYCLES }
+
+
+static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ccn_pmu_event *event = container_of(attr,
+ struct arm_ccn_pmu_event, attr);
+ ssize_t res;
+
+ res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
+ if (event->event)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
+ event->event);
+ if (event->def)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
+ event->def);
+ if (event->mask)
+ res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
+ event->mask);
+ res += snprintf(buf + res, PAGE_SIZE - res, "\n");
+
+ return res;
+}
+
+static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ struct device_attribute *dev_attr = container_of(attr,
+ struct device_attribute, attr);
+ struct arm_ccn_pmu_event *event = container_of(dev_attr,
+ struct arm_ccn_pmu_event, attr);
+
+ if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
+ return 0;
+ if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
+ CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+ CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+ CCN_IDX_MASK_ORDER),
+ CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
+ CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
+ CCN_IDX_MASK_ORDER),
+ CCN_EVENT_HNF(cache_miss, 0x1),
+ CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
+ CCN_EVENT_HNF(cache_fill, 0x3),
+ CCN_EVENT_HNF(pocq_retry, 0x4),
+ CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
+ CCN_EVENT_HNF(sf_hit, 0x6),
+ CCN_EVENT_HNF(sf_evictions, 0x7),
+ CCN_EVENT_HNF(snoops_sent, 0x8),
+ CCN_EVENT_HNF(snoops_broadcast, 0x9),
+ CCN_EVENT_HNF(l3_eviction, 0xa),
+ CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
+ CCN_EVENT_HNF(mc_retries, 0xc),
+ CCN_EVENT_HNF(mc_reqs, 0xd),
+ CCN_EVENT_HNF(qos_hh_retry, 0xe),
+ CCN_EVENT_RNI(rdata_beats_p0, 0x1),
+ CCN_EVENT_RNI(rdata_beats_p1, 0x2),
+ CCN_EVENT_RNI(rdata_beats_p2, 0x3),
+ CCN_EVENT_RNI(rxdat_flits, 0x4),
+ CCN_EVENT_RNI(txdat_flits, 0x5),
+ CCN_EVENT_RNI(txreq_flits, 0x6),
+ CCN_EVENT_RNI(txreq_flits_retried, 0x7),
+ CCN_EVENT_RNI(rrt_full, 0x8),
+ CCN_EVENT_RNI(wrt_full, 0x9),
+ CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
+ CCN_EVENT_XP(upload_starvation, 0x1),
+ CCN_EVENT_XP(download_starvation, 0x2),
+ CCN_EVENT_XP(respin, 0x3),
+ CCN_EVENT_XP(valid_flit, 0x4),
+ CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
+ CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
+ CCN_EVENT_SBAS(rxdat_flits, 0x4),
+ CCN_EVENT_SBAS(txdat_flits, 0x5),
+ CCN_EVENT_SBAS(txreq_flits, 0x6),
+ CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
+ CCN_EVENT_SBAS(rrt_full, 0x8),
+ CCN_EVENT_SBAS(wrt_full, 0x9),
+ CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
+ CCN_EVENT_CYCLES(cycles),
+};
+
+/* Populated in arm_ccn_init() */
+static struct attribute
+ *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
+
+static struct attribute_group arm_ccn_pmu_events_attr_group = {
+ .name = "events",
+ .is_visible = arm_ccn_pmu_events_is_visible,
+ .attrs = arm_ccn_pmu_events_attrs,
+};
+
+
+static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
+{
+ unsigned long i;
+
+ if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
+ return NULL;
+ i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
+
+ switch (name[1]) {
+ case 'l':
+ return &ccn->dt.cmp_mask[i].l;
+ case 'h':
+ return &ccn->dt.cmp_mask[i].h;
+ default:
+ return NULL;
+ }
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+
+ return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
+}
+
+static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
+ int err = -EINVAL;
+
+ if (mask)
+ err = kstrtoull(buf, 0, mask);
+
+ return err ? err : count;
+}
+
+#define CCN_CMP_MASK_ATTR(_name) \
+ struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+ __ATTR(_name, S_IRUGO | S_IWUSR, \
+ arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
+
+#define CCN_CMP_MASK_ATTR_RO(_name) \
+ struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
+ __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
+
+static CCN_CMP_MASK_ATTR(0l);
+static CCN_CMP_MASK_ATTR(0h);
+static CCN_CMP_MASK_ATTR(1l);
+static CCN_CMP_MASK_ATTR(1h);
+static CCN_CMP_MASK_ATTR(2l);
+static CCN_CMP_MASK_ATTR(2h);
+static CCN_CMP_MASK_ATTR(3l);
+static CCN_CMP_MASK_ATTR(3h);
+static CCN_CMP_MASK_ATTR(4l);
+static CCN_CMP_MASK_ATTR(4h);
+static CCN_CMP_MASK_ATTR(5l);
+static CCN_CMP_MASK_ATTR(5h);
+static CCN_CMP_MASK_ATTR(6l);
+static CCN_CMP_MASK_ATTR(6h);
+static CCN_CMP_MASK_ATTR(7l);
+static CCN_CMP_MASK_ATTR(7h);
+static CCN_CMP_MASK_ATTR_RO(8l);
+static CCN_CMP_MASK_ATTR_RO(8h);
+static CCN_CMP_MASK_ATTR_RO(9l);
+static CCN_CMP_MASK_ATTR_RO(9h);
+static CCN_CMP_MASK_ATTR_RO(al);
+static CCN_CMP_MASK_ATTR_RO(ah);
+static CCN_CMP_MASK_ATTR_RO(bl);
+static CCN_CMP_MASK_ATTR_RO(bh);
+
+static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
+ &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
+ &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
+ &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
+ NULL
+};
+
+static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
+ .name = "cmp_mask",
+ .attrs = arm_ccn_pmu_cmp_mask_attrs,
+};
+
+
+/*
+ * Default poll period is 10ms, which is way over the top anyway,
+ * as in the worst case scenario (an event every cycle), with 1GHz
+ * clocked bus, the smallest, 32 bit counter will overflow in
+ * more than 4s.
+ */
+static unsigned int arm_ccn_pmu_poll_period_us = 10000;
+module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
+ S_IRUGO | S_IWUSR);
+
+static ktime_t arm_ccn_pmu_timer_period(void)
+{
+ return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
+}
+
+
+static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
+ &arm_ccn_pmu_events_attr_group,
+ &arm_ccn_pmu_format_attr_group,
+ &arm_ccn_pmu_cmp_mask_attr_group,
+ NULL
+};
+
+
+static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
+{
+ int bit;
+
+ do {
+ bit = find_first_zero_bit(bitmap, size);
+ if (bit >= size)
+ return -EAGAIN;
+ } while (test_and_set_bit(bit, bitmap));
+
+ return bit;
+}
+
+/* All RN-I and RN-D nodes have identical PMUs */
+static int arm_ccn_pmu_type_eq(u32 a, u32 b)
+{
+ if (a == b)
+ return 1;
+
+ switch (a) {
+ case CCN_TYPE_RNI_1P:
+ case CCN_TYPE_RNI_2P:
+ case CCN_TYPE_RNI_3P:
+ case CCN_TYPE_RND_1P:
+ case CCN_TYPE_RND_2P:
+ case CCN_TYPE_RND_3P:
+ switch (b) {
+ case CCN_TYPE_RNI_1P:
+ case CCN_TYPE_RNI_2P:
+ case CCN_TYPE_RNI_3P:
+ case CCN_TYPE_RND_1P:
+ case CCN_TYPE_RND_2P:
+ case CCN_TYPE_RND_3P:
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int arm_ccn_pmu_event_init(struct perf_event *event)
+{
+ struct arm_ccn *ccn;
+ struct hw_perf_event *hw = &event->hw;
+ u32 node_xp, type, event_id;
+ int valid, bit;
+ struct arm_ccn_component *source;
+ int i;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ ccn = pmu_to_arm_ccn(event->pmu);
+
+ if (hw->sample_period) {
+ dev_warn(ccn->dev, "Sampling not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (has_branch_stack(event) || event->attr.exclude_user ||
+ event->attr.exclude_kernel || event->attr.exclude_hv ||
+ event->attr.exclude_idle) {
+ dev_warn(ccn->dev, "Can't exclude execution levels!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (event->cpu < 0) {
+ dev_warn(ccn->dev, "Can't provide per-task data!\n");
+ return -EOPNOTSUPP;
+ }
+
+ node_xp = CCN_CONFIG_NODE(event->attr.config);
+ type = CCN_CONFIG_TYPE(event->attr.config);
+ event_id = CCN_CONFIG_EVENT(event->attr.config);
+
+ /* Validate node/xp vs topology */
+ switch (type) {
+ case CCN_TYPE_XP:
+ if (node_xp >= ccn->num_xps) {
+ dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+ return -EINVAL;
+ }
+ break;
+ case CCN_TYPE_CYCLES:
+ break;
+ default:
+ if (node_xp >= ccn->num_nodes) {
+ dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
+ return -EINVAL;
+ }
+ if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
+ dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
+ type, node_xp);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ /* Validate event ID vs available for the type */
+ for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
+ i++) {
+ struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
+ u32 port = CCN_CONFIG_PORT(event->attr.config);
+ u32 vc = CCN_CONFIG_VC(event->attr.config);
+
+ if (!arm_ccn_pmu_type_eq(type, e->type))
+ continue;
+ if (event_id != e->event)
+ continue;
+ if (e->num_ports && port >= e->num_ports) {
+ dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
+ port, node_xp);
+ return -EINVAL;
+ }
+ if (e->num_vcs && vc >= e->num_vcs) {
+ dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
+ port, node_xp);
+ return -EINVAL;
+ }
+ valid = 1;
+ }
+ if (!valid) {
+ dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
+ event_id, node_xp);
+ return -EINVAL;
+ }
+
+ /* Watchpoint-based event for a node is actually set on XP */
+ if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
+ u32 port;
+
+ type = CCN_TYPE_XP;
+ port = arm_ccn_node_to_xp_port(node_xp);
+ node_xp = arm_ccn_node_to_xp(node_xp);
+
+ arm_ccn_pmu_config_set(&event->attr.config,
+ node_xp, type, port);
+ }
+
+ /* Allocate the cycle counter */
+ if (type == CCN_TYPE_CYCLES) {
+ if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
+ ccn->dt.pmu_counters_mask))
+ return -EAGAIN;
+
+ hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
+ ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
+
+ return 0;
+ }
+
+ /* Allocate an event counter */
+ hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
+ CCN_NUM_PMU_EVENT_COUNTERS);
+ if (hw->idx < 0) {
+ dev_warn(ccn->dev, "No more counters available!\n");
+ return -EAGAIN;
+ }
+
+ if (type == CCN_TYPE_XP)
+ source = &ccn->xp[node_xp];
+ else
+ source = &ccn->node[node_xp];
+ ccn->dt.pmu_counters[hw->idx].source = source;
+
+ /* Allocate an event source or a watchpoint */
+ if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
+ bit = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
+ CCN_NUM_XP_WATCHPOINTS);
+ else
+ bit = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
+ CCN_NUM_PMU_EVENTS);
+ if (bit < 0) {
+ dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
+ node_xp);
+ clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+ return -EAGAIN;
+ }
+ hw->config_base = bit;
+
+ ccn->dt.pmu_counters[hw->idx].event = event;
+
+ return 0;
+}
+
+static void arm_ccn_pmu_event_free(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+ clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
+ } else {
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
+ CCN_CONFIG_EVENT(event->attr.config) ==
+ CCN_EVENT_WATCHPOINT)
+ clear_bit(hw->config_base, source->xp.dt_cmp_mask);
+ else
+ clear_bit(hw->config_base, source->pmu_events_mask);
+ clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
+ }
+
+ ccn->dt.pmu_counters[hw->idx].source = NULL;
+ ccn->dt.pmu_counters[hw->idx].event = NULL;
+}
+
+static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
+{
+ u64 res;
+
+ if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
+#ifdef readq
+ res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
+#else
+ /* 40 bit counter, can do snapshot and read in two parts */
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
+ while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
+ ;
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+ res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
+ res <<= 32;
+ res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
+#endif
+ } else {
+ res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
+ }
+
+ return res;
+}
+
+static void arm_ccn_pmu_event_update(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 prev_count, new_count, mask;
+
+ do {
+ prev_count = local64_read(&hw->prev_count);
+ new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
+ } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
+
+ mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
+
+ local64_add((new_count - prev_count) & mask, &event->count);
+}
+
+static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *xp;
+ u32 val, dt_cfg;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
+ else
+ xp = &ccn->xp[arm_ccn_node_to_xp(
+ CCN_CONFIG_NODE(event->attr.config))];
+
+ if (enable)
+ dt_cfg = hw->event_base;
+ else
+ dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
+
+ spin_lock(&ccn->dt.config_lock);
+
+ val = readl(xp->base + CCN_XP_DT_CONFIG);
+ val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
+ CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
+ val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
+ writel(val, xp->base + CCN_XP_DT_CONFIG);
+
+ spin_unlock(&ccn->dt.config_lock);
+}
+
+static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+
+ local64_set(&event->hw.prev_count,
+ arm_ccn_pmu_read_counter(ccn, hw->idx));
+ hw->state = 0;
+
+ if (!ccn->irq_used)
+ hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
+ HRTIMER_MODE_REL);
+
+ /* Set the DT bus input, engaging the counter */
+ arm_ccn_pmu_xp_dt_config(event, 1);
+}
+
+static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u64 timeout;
+
+ /* Disable counting, setting the DT bus to pass-through mode */
+ arm_ccn_pmu_xp_dt_config(event, 0);
+
+ if (!ccn->irq_used)
+ hrtimer_cancel(&ccn->dt.hrtimer);
+
+ /* Let the DT bus drain */
+ timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
+ ccn->num_xps;
+ while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
+ timeout)
+ cpu_relax();
+
+ if (flags & PERF_EF_UPDATE)
+ arm_ccn_pmu_event_update(event);
+
+ hw->state |= PERF_HES_STOPPED;
+}
+
+static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ unsigned long wp = hw->config_base;
+ u32 val;
+ u64 cmp_l = event->attr.config1;
+ u64 cmp_h = event->attr.config2;
+ u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
+ u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
+
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
+
+ /* Direction (RX/TX), device (port) & virtual channel */
+ val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_DIR(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_PORT(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
+ val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
+ CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
+ val |= CCN_CONFIG_VC(event->attr.config) <<
+ CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
+ writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
+
+ /* Comparison values */
+ writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
+ writel((cmp_l >> 32) & 0xefffffff,
+ source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
+ writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
+ writel((cmp_h >> 32) & 0x0fffffff,
+ source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
+
+ /* Mask */
+ writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
+ writel((mask_l >> 32) & 0xefffffff,
+ source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
+ writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
+ writel((mask_h >> 32) & 0x0fffffff,
+ source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
+}
+
+static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ u32 val, id;
+
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
+
+ id = (CCN_CONFIG_VC(event->attr.config) << 4) |
+ (CCN_CONFIG_PORT(event->attr.config) << 3) |
+ (CCN_CONFIG_EVENT(event->attr.config) << 0);
+
+ val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
+ val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
+ CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+ val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+ writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_node_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ struct arm_ccn_component *source =
+ ccn->dt.pmu_counters[hw->idx].source;
+ u32 type = CCN_CONFIG_TYPE(event->attr.config);
+ u32 val, port;
+
+ port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
+ hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
+ hw->config_base);
+
+ /* These *_event_sel regs should be identical, but let's make sure... */
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
+ CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
+ CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
+ BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
+ CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
+ BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
+ CCN_RNI_PMU_EVENT_SEL__ID__MASK);
+ if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
+ !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
+ return;
+
+ /* Set the event id for the pre-allocated counter */
+ val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
+ val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
+ CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
+ val |= CCN_CONFIG_EVENT(event->attr.config) <<
+ CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
+ writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
+}
+
+static void arm_ccn_pmu_event_config(struct perf_event *event)
+{
+ struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
+ struct hw_perf_event *hw = &event->hw;
+ u32 xp, offset, val;
+
+ /* Cycle counter requires no setup */
+ if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
+ return;
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ xp = CCN_CONFIG_XP(event->attr.config);
+ else
+ xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
+
+ spin_lock(&ccn->dt.config_lock);
+
+ /* Set the DT bus "distance" register */
+ offset = (hw->idx / 4) * 4;
+ val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+ val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
+ CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
+ val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
+ writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
+
+ if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
+ if (CCN_CONFIG_EVENT(event->attr.config) ==
+ CCN_EVENT_WATCHPOINT)
+ arm_ccn_pmu_xp_watchpoint_config(event);
+ else
+ arm_ccn_pmu_xp_event_config(event);
+ } else {
+ arm_ccn_pmu_node_event_config(event);
+ }
+
+ spin_unlock(&ccn->dt.config_lock);
+}
+
+static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hw = &event->hw;
+
+ arm_ccn_pmu_event_config(event);
+
+ hw->state = PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
+
+ return 0;
+}
+
+static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
+{
+ arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
+
+ arm_ccn_pmu_event_free(event);
+}
+
+static void arm_ccn_pmu_event_read(struct perf_event *event)
+{
+ arm_ccn_pmu_event_update(event);
+}
+
+static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
+{
+ u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
+ int idx;
+
+ if (!pmovsr)
+ return IRQ_NONE;
+
+ writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
+
+ BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
+
+ for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
+ struct perf_event *event = dt->pmu_counters[idx].event;
+ int overflowed = pmovsr & BIT(idx);
+
+ WARN_ON_ONCE(overflowed && !event);
+
+ if (!event || !overflowed)
+ continue;
+
+ arm_ccn_pmu_event_update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
+{
+ struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
+ hrtimer);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ arm_ccn_pmu_overflow_handler(dt);
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
+ return HRTIMER_RESTART;
+}
+
+
+static DEFINE_IDA(arm_ccn_pmu_ida);
+
+static int arm_ccn_pmu_init(struct arm_ccn *ccn)
+{
+ int i;
+ char *name;
+
+ /* Initialize DT subsystem */
+ ccn->dt.base = ccn->base + CCN_REGION_SIZE;
+ spin_lock_init(&ccn->dt.config_lock);
+ writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
+ writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
+ ccn->dt.base + CCN_DT_PMCR);
+ writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
+ for (i = 0; i < ccn->num_xps; i++) {
+ writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
+ writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+ CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
+ (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
+ CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
+ CCN_XP_DT_CONTROL__DT_ENABLE,
+ ccn->xp[i].base + CCN_XP_DT_CONTROL);
+ }
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
+ ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
+ ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
+
+ /* Get a convenient /sys/event_source/devices/ name */
+ ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
+ if (ccn->dt.id == 0) {
+ name = "ccn";
+ } else {
+ int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
+
+ name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
+ snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
+ }
+
+ /* Perf driver registration */
+ ccn->dt.pmu = (struct pmu) {
+ .attr_groups = arm_ccn_pmu_attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = arm_ccn_pmu_event_init,
+ .add = arm_ccn_pmu_event_add,
+ .del = arm_ccn_pmu_event_del,
+ .start = arm_ccn_pmu_event_start,
+ .stop = arm_ccn_pmu_event_stop,
+ .read = arm_ccn_pmu_event_read,
+ };
+
+ /* No overflow interrupt? Have to use a timer instead. */
+ if (!ccn->irq_used) {
+ dev_info(ccn->dev, "No access to interrupts, using timer.\n");
+ hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
+ }
+
+ return perf_pmu_register(&ccn->dt.pmu, name, -1);
+}
+
+static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
+{
+ int i;
+
+ for (i = 0; i < ccn->num_xps; i++)
+ writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
+ writel(0, ccn->dt.base + CCN_DT_PMCR);
+ perf_pmu_unregister(&ccn->dt.pmu);
+ ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
+}
+
+
+static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
+ int (*callback)(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id))
+{
+ int region;
+
+ for (region = 0; region < CCN_NUM_REGIONS; region++) {
+ u32 val, type, id;
+ void __iomem *base;
+ int err;
+
+ val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
+ 4 * (region / 32));
+ if (!(val & (1 << (region % 32))))
+ continue;
+
+ base = ccn->base + region * CCN_REGION_SIZE;
+ val = readl(base + CCN_ALL_OLY_ID);
+ type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
+ CCN_ALL_OLY_ID__OLY_ID__MASK;
+ id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
+ CCN_ALL_OLY_ID__NODE_ID__MASK;
+
+ err = callback(ccn, region, base, type, id);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id)
+{
+
+ if (type == CCN_TYPE_XP && id >= ccn->num_xps)
+ ccn->num_xps = id + 1;
+ else if (id >= ccn->num_nodes)
+ ccn->num_nodes = id + 1;
+
+ return 0;
+}
+
+static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
+ void __iomem *base, u32 type, u32 id)
+{
+ struct arm_ccn_component *component;
+
+ dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
+
+ switch (type) {
+ case CCN_TYPE_MN:
+ case CCN_TYPE_DT:
+ return 0;
+ case CCN_TYPE_XP:
+ component = &ccn->xp[id];
+ break;
+ case CCN_TYPE_SBSX:
+ ccn->sbsx_present = 1;
+ component = &ccn->node[id];
+ break;
+ case CCN_TYPE_SBAS:
+ ccn->sbas_present = 1;
+ /* Fall-through */
+ default:
+ component = &ccn->node[id];
+ break;
+ }
+
+ component->base = base;
+ component->type = type;
+
+ return 0;
+}
+
+
+static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
+ const u32 *err_sig_val)
+{
+ /* This should be really handled by firmware... */
+ dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
+ err_sig_val[5], err_sig_val[4], err_sig_val[3],
+ err_sig_val[2], err_sig_val[1], err_sig_val[0]);
+ dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
+ writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+
+static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
+{
+ irqreturn_t res = IRQ_NONE;
+ struct arm_ccn *ccn = dev_id;
+ u32 err_sig_val[6];
+ u32 err_or;
+ int i;
+
+ /* PMU overflow is a special case */
+ err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
+ if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
+ err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
+ res = arm_ccn_pmu_overflow_handler(&ccn->dt);
+ }
+
+ /* Have to read all err_sig_vals to clear them */
+ for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
+ err_sig_val[i] = readl(ccn->base +
+ CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
+ err_or |= err_sig_val[i];
+ }
+ if (err_or)
+ res |= arm_ccn_error_handler(ccn, err_sig_val);
+
+ if (res != IRQ_NONE)
+ writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+
+ return res;
+}
+
+
+static int arm_ccn_probe(struct platform_device *pdev)
+{
+ struct arm_ccn *ccn;
+ struct resource *res;
+ int err;
+
+ ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
+ if (!ccn)
+ return -ENOMEM;
+ ccn->dev = &pdev->dev;
+ platform_set_drvdata(pdev, ccn);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(ccn->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ccn->base = devm_ioremap(ccn->dev, res->start,
+ resource_size(res));
+ if (!ccn->base)
+ return -EFAULT;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res)
+ return -EINVAL;
+
+ /* Check if we can use the interrupt */
+ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+ if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
+ CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
+ /* Can set 'disable' bits, so can acknowledge interrupts */
+ writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
+ ccn->base + CCN_MN_ERRINT_STATUS);
+ err = devm_request_irq(ccn->dev, res->start,
+ arm_ccn_irq_handler, 0, dev_name(ccn->dev),
+ ccn);
+ if (err)
+ return err;
+
+ ccn->irq_used = 1;
+ }
+
+
+ /* Build topology */
+
+ err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
+ if (err)
+ return err;
+
+ ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes,
+ GFP_KERNEL);
+ ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps,
+ GFP_KERNEL);
+ if (!ccn->node || !ccn->xp)
+ return -ENOMEM;
+
+ err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
+ if (err)
+ return err;
+
+ return arm_ccn_pmu_init(ccn);
+}
+
+static int arm_ccn_remove(struct platform_device *pdev)
+{
+ struct arm_ccn *ccn = platform_get_drvdata(pdev);
+
+ arm_ccn_pmu_cleanup(ccn);
+
+ return 0;
+}
+
+static const struct of_device_id arm_ccn_match[] = {
+ { .compatible = "arm,ccn-504", },
+ {},
+};
+
+static struct platform_driver arm_ccn_driver = {
+ .driver = {
+ .name = "arm-ccn",
+ .of_match_table = arm_ccn_match,
+ },
+ .probe = arm_ccn_probe,
+ .remove = arm_ccn_remove,
+};
+
+static int __init arm_ccn_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
+ arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
+
+ return platform_driver_register(&arm_ccn_driver);
+}
+
+static void __exit arm_ccn_exit(void)
+{
+ platform_driver_unregister(&arm_ccn_driver);
+}
+
+module_init(arm_ccn_init);
+module_exit(arm_ccn_exit);
+
+MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index f8ee13c7bf7b..75c9681f8021 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -162,7 +162,9 @@ static int __init weim_parse_dt(struct platform_device *pdev,
}
}
- ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(pdev->dev.of_node,
+ of_default_bus_match_table,
+ NULL, &pdev->dev);
if (ret)
dev_err(&pdev->dev, "%s fail to create devices.\n",
pdev->dev.of_node->full_name);
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index b29703324e94..09f17eb73486 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -710,19 +710,6 @@ static int agp_open(struct inode *inode, struct file *file)
return 0;
}
-
-static ssize_t agp_read(struct file *file, char __user *buf,
- size_t count, loff_t * ppos)
-{
- return -EINVAL;
-}
-
-static ssize_t agp_write(struct file *file, const char __user *buf,
- size_t count, loff_t * ppos)
-{
- return -EINVAL;
-}
-
static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_info userinfo;
@@ -1047,8 +1034,6 @@ static const struct file_operations agp_fops =
{
.owner = THIS_MODULE,
.llseek = no_llseek,
- .read = agp_read,
- .write = agp_write,
.unlocked_ioctl = agp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_agp_ioctl,
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 6e02ec103cc7..aa30a25c8d49 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -81,12 +81,6 @@ static void add_early_randomness(struct hwrng *rng)
unsigned char bytes[16];
int bytes_read;
- /*
- * Currently only virtio-rng cannot return data during device
- * probe, and that's handled in virtio-rng.c itself. If there
- * are more such devices, this call to rng_get_data can be
- * made conditional here instead of doing it per-device.
- */
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
if (bytes_read > 0)
add_device_randomness(bytes, bytes_read);
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index e9b15bc18b4d..0027137daa56 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -28,17 +28,16 @@
static DEFINE_IDA(rng_index_ida);
struct virtrng_info {
- struct virtio_device *vdev;
struct hwrng hwrng;
struct virtqueue *vq;
- unsigned int data_avail;
struct completion have_data;
- bool busy;
char name[25];
+ unsigned int data_avail;
int index;
+ bool busy;
+ bool hwrng_register_done;
};
-static bool probe_done;
static void random_recv_done(struct virtqueue *vq)
{
@@ -69,13 +68,6 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
- /*
- * Don't ask host for data till we're setup. This call can
- * happen during hwrng_register(), after commit d9e7972619.
- */
- if (unlikely(!probe_done))
- return 0;
-
if (!vi->busy) {
vi->busy = true;
init_completion(&vi->have_data);
@@ -137,25 +129,17 @@ static int probe_common(struct virtio_device *vdev)
return err;
}
- err = hwrng_register(&vi->hwrng);
- if (err) {
- vdev->config->del_vqs(vdev);
- vi->vq = NULL;
- kfree(vi);
- ida_simple_remove(&rng_index_ida, index);
- return err;
- }
-
- probe_done = true;
return 0;
}
static void remove_common(struct virtio_device *vdev)
{
struct virtrng_info *vi = vdev->priv;
+
vdev->config->reset(vdev);
vi->busy = false;
- hwrng_unregister(&vi->hwrng);
+ if (vi->hwrng_register_done)
+ hwrng_unregister(&vi->hwrng);
vdev->config->del_vqs(vdev);
ida_simple_remove(&rng_index_ida, vi->index);
kfree(vi);
@@ -171,6 +155,16 @@ static void virtrng_remove(struct virtio_device *vdev)
remove_common(vdev);
}
+static void virtrng_scan(struct virtio_device *vdev)
+{
+ struct virtrng_info *vi = vdev->priv;
+ int err;
+
+ err = hwrng_register(&vi->hwrng);
+ if (!err)
+ vi->hwrng_register_done = true;
+}
+
#ifdef CONFIG_PM_SLEEP
static int virtrng_freeze(struct virtio_device *vdev)
{
@@ -195,6 +189,7 @@ static struct virtio_driver virtio_rng_driver = {
.id_table = id_table,
.probe = virtrng_probe,
.remove = virtrng_remove,
+ .scan = virtrng_scan,
#ifdef CONFIG_PM_SLEEP
.freeze = virtrng_freeze,
.restore = virtrng_restore,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 60aafb8a1f2e..b585b4789822 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2262,8 +2262,7 @@ static int __init init(void)
unregister:
unregister_virtio_driver(&virtio_console);
free:
- if (pdrvdata.debugfs_dir)
- debugfs_remove_recursive(pdrvdata.debugfs_dir);
+ debugfs_remove_recursive(pdrvdata.debugfs_dir);
class_destroy(pdrvdata.class);
return err;
}
@@ -2276,8 +2275,7 @@ static void __exit fini(void)
unregister_virtio_driver(&virtio_rproc_serial);
class_destroy(pdrvdata.class);
- if (pdrvdata.debugfs_dir)
- debugfs_remove_recursive(pdrvdata.debugfs_dir);
+ debugfs_remove_recursive(pdrvdata.debugfs_dir);
}
module_init(init);
module_exit(fini);
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 8ebf757d29e2..3821a88077ea 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -16,10 +16,19 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/delay.h>
+#include <linux/mvebu-pmsu.h>
+#include <asm/smp_plat.h>
-#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
-#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
-#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
+#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
+#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
+#define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
+#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
+#define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
+#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
+#define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
+
+#define PMU_DFS_RATIO_SHIFT 16
+#define PMU_DFS_RATIO_MASK 0x3F
#define MAX_CPU 4
struct cpu_clk {
@@ -28,6 +37,7 @@ struct cpu_clk {
const char *clk_name;
const char *parent_name;
void __iomem *reg_base;
+ void __iomem *pmu_dfs;
};
static struct clk **clks;
@@ -62,8 +72,9 @@ static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
return *parent_rate / div;
}
-static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long parent_rate)
+static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+
{
struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
u32 reg, div;
@@ -95,6 +106,58 @@ static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
return 0;
}
+static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 reg;
+ unsigned long fabric_div, target_div, cur_rate;
+ struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
+
+ /*
+ * PMU DFS registers are not mapped, Device Tree does not
+ * describes them. We cannot change the frequency dynamically.
+ */
+ if (!cpuclk->pmu_dfs)
+ return -ENODEV;
+
+ cur_rate = __clk_get_rate(hwclk->clk);
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
+ fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
+ SYS_CTRL_CLK_DIVIDER_MASK;
+
+ /* Frequency is going up */
+ if (rate == 2 * cur_rate)
+ target_div = fabric_div / 2;
+ /* Frequency is going down */
+ else
+ target_div = fabric_div;
+
+ if (target_div == 0)
+ target_div = 1;
+
+ reg = readl(cpuclk->pmu_dfs);
+ reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
+ reg |= (target_div << PMU_DFS_RATIO_SHIFT);
+ writel(reg, cpuclk->pmu_dfs);
+
+ reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+ reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
+ SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
+ writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
+
+ return mvebu_pmsu_dfs_request(cpuclk->cpu);
+}
+
+static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ if (__clk_is_enabled(hwclk->clk))
+ return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
+ else
+ return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
+}
+
static const struct clk_ops cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
.round_rate = clk_cpu_round_rate,
@@ -105,6 +168,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
{
struct cpu_clk *cpuclk;
void __iomem *clock_complex_base = of_iomap(node, 0);
+ void __iomem *pmu_dfs_base = of_iomap(node, 1);
int ncpus = 0;
struct device_node *dn;
@@ -114,6 +178,10 @@ static void __init of_cpu_clk_setup(struct device_node *node)
return;
}
+ if (pmu_dfs_base == NULL)
+ pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
+ __func__);
+
for_each_node_by_type(dn, "cpu")
ncpus++;
@@ -146,6 +214,8 @@ static void __init of_cpu_clk_setup(struct device_node *node)
cpuclk[cpu].clk_name = clk_name;
cpuclk[cpu].cpu = cpu;
cpuclk[cpu].reg_base = clock_complex_base;
+ if (pmu_dfs_base)
+ cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
cpuclk[cpu].hw.init = &init;
init.name = cpuclk[cpu].clk_name;
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 2949a556af8f..6fb4bc602e8a 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o
obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
+obj-$(CONFIG_ARCH_S5PV210) += clk-s5pv210.o clk-s5pv210-audss.o
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
new file mode 100644
index 000000000000..a8053b4aca56
--- /dev/null
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2014 Tomasz Figa <t.figa@samsung.com>
+ *
+ * Based on Exynos Audio Subsystem Clock Controller driver:
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Author: Padmavathi Venna <padma.v@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs.
+*/
+
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/s5pv210-audss.h>
+
+static DEFINE_SPINLOCK(lock);
+static struct clk **clk_table;
+static void __iomem *reg_base;
+static struct clk_onecell_data clk_data;
+
+#define ASS_CLK_SRC 0x0
+#define ASS_CLK_DIV 0x4
+#define ASS_CLK_GATE 0x8
+
+#ifdef CONFIG_PM_SLEEP
+static unsigned long reg_save[][2] = {
+ {ASS_CLK_SRC, 0},
+ {ASS_CLK_DIV, 0},
+ {ASS_CLK_GATE, 0},
+};
+
+static int s5pv210_audss_clk_suspend(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_save); i++)
+ reg_save[i][1] = readl(reg_base + reg_save[i][0]);
+
+ return 0;
+}
+
+static void s5pv210_audss_clk_resume(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_save); i++)
+ writel(reg_save[i][1], reg_base + reg_save[i][0]);
+}
+
+static struct syscore_ops s5pv210_audss_clk_syscore_ops = {
+ .suspend = s5pv210_audss_clk_suspend,
+ .resume = s5pv210_audss_clk_resume,
+};
+#endif /* CONFIG_PM_SLEEP */
+
+/* register s5pv210_audss clocks */
+static int s5pv210_audss_clk_probe(struct platform_device *pdev)
+{
+ int i, ret = 0;
+ struct resource *res;
+ const char *mout_audss_p[2];
+ const char *mout_i2s_p[3];
+ const char *hclk_p;
+ struct clk *hclk, *pll_ref, *pll_in, *cdclk, *sclk_audio;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(reg_base)) {
+ dev_err(&pdev->dev, "failed to map audss registers\n");
+ return PTR_ERR(reg_base);
+ }
+
+ clk_table = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * AUDSS_MAX_CLKS,
+ GFP_KERNEL);
+ if (!clk_table)
+ return -ENOMEM;
+
+ clk_data.clks = clk_table;
+ clk_data.clk_num = AUDSS_MAX_CLKS;
+
+ hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(hclk)) {
+ dev_err(&pdev->dev, "failed to get hclk clock\n");
+ return PTR_ERR(hclk);
+ }
+
+ pll_in = devm_clk_get(&pdev->dev, "fout_epll");
+ if (IS_ERR(pll_in)) {
+ dev_err(&pdev->dev, "failed to get fout_epll clock\n");
+ return PTR_ERR(pll_in);
+ }
+
+ sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio0");
+ if (IS_ERR(sclk_audio)) {
+ dev_err(&pdev->dev, "failed to get sclk_audio0 clock\n");
+ return PTR_ERR(sclk_audio);
+ }
+
+ /* iiscdclk0 is an optional external I2S codec clock */
+ cdclk = devm_clk_get(&pdev->dev, "iiscdclk0");
+ pll_ref = devm_clk_get(&pdev->dev, "xxti");
+
+ if (!IS_ERR(pll_ref))
+ mout_audss_p[0] = __clk_get_name(pll_ref);
+ else
+ mout_audss_p[0] = "xxti";
+ mout_audss_p[1] = __clk_get_name(pll_in);
+ clk_table[CLK_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
+ mout_audss_p, ARRAY_SIZE(mout_audss_p),
+ CLK_SET_RATE_NO_REPARENT,
+ reg_base + ASS_CLK_SRC, 0, 1, 0, &lock);
+
+ mout_i2s_p[0] = "mout_audss";
+ if (!IS_ERR(cdclk))
+ mout_i2s_p[1] = __clk_get_name(cdclk);
+ else
+ mout_i2s_p[1] = "iiscdclk0";
+ mout_i2s_p[2] = __clk_get_name(sclk_audio);
+ clk_table[CLK_MOUT_I2S_A] = clk_register_mux(NULL, "mout_i2s_audss",
+ mout_i2s_p, ARRAY_SIZE(mout_i2s_p),
+ CLK_SET_RATE_NO_REPARENT,
+ reg_base + ASS_CLK_SRC, 2, 2, 0, &lock);
+
+ clk_table[CLK_DOUT_AUD_BUS] = clk_register_divider(NULL,
+ "dout_aud_bus", "mout_audss", 0,
+ reg_base + ASS_CLK_DIV, 0, 4, 0, &lock);
+ clk_table[CLK_DOUT_I2S_A] = clk_register_divider(NULL, "dout_i2s_audss",
+ "mout_i2s_audss", 0, reg_base + ASS_CLK_DIV,
+ 4, 4, 0, &lock);
+
+ clk_table[CLK_I2S] = clk_register_gate(NULL, "i2s_audss",
+ "dout_i2s_audss", CLK_SET_RATE_PARENT,
+ reg_base + ASS_CLK_GATE, 6, 0, &lock);
+
+ hclk_p = __clk_get_name(hclk);
+
+ clk_table[CLK_HCLK_I2S] = clk_register_gate(NULL, "hclk_i2s_audss",
+ hclk_p, CLK_IGNORE_UNUSED,
+ reg_base + ASS_CLK_GATE, 5, 0, &lock);
+ clk_table[CLK_HCLK_UART] = clk_register_gate(NULL, "hclk_uart_audss",
+ hclk_p, CLK_IGNORE_UNUSED,
+ reg_base + ASS_CLK_GATE, 4, 0, &lock);
+ clk_table[CLK_HCLK_HWA] = clk_register_gate(NULL, "hclk_hwa_audss",
+ hclk_p, CLK_IGNORE_UNUSED,
+ reg_base + ASS_CLK_GATE, 3, 0, &lock);
+ clk_table[CLK_HCLK_DMA] = clk_register_gate(NULL, "hclk_dma_audss",
+ hclk_p, CLK_IGNORE_UNUSED,
+ reg_base + ASS_CLK_GATE, 2, 0, &lock);
+ clk_table[CLK_HCLK_BUF] = clk_register_gate(NULL, "hclk_buf_audss",
+ hclk_p, CLK_IGNORE_UNUSED,
+ reg_base + ASS_CLK_GATE, 1, 0, &lock);
+ clk_table[CLK_HCLK_RP] = clk_register_gate(NULL, "hclk_rp_audss",
+ hclk_p, CLK_IGNORE_UNUSED,
+ reg_base + ASS_CLK_GATE, 0, 0, &lock);
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (IS_ERR(clk_table[i])) {
+ dev_err(&pdev->dev, "failed to register clock %d\n", i);
+ ret = PTR_ERR(clk_table[i]);
+ goto unregister;
+ }
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ &clk_data);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add clock provider\n");
+ goto unregister;
+ }
+
+#ifdef CONFIG_PM_SLEEP
+ register_syscore_ops(&s5pv210_audss_clk_syscore_ops);
+#endif
+
+ return 0;
+
+unregister:
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (!IS_ERR(clk_table[i]))
+ clk_unregister(clk_table[i]);
+ }
+
+ return ret;
+}
+
+static int s5pv210_audss_clk_remove(struct platform_device *pdev)
+{
+ int i;
+
+ of_clk_del_provider(pdev->dev.of_node);
+
+ for (i = 0; i < clk_data.clk_num; i++) {
+ if (!IS_ERR(clk_table[i]))
+ clk_unregister(clk_table[i]);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id s5pv210_audss_clk_of_match[] = {
+ { .compatible = "samsung,s5pv210-audss-clock", },
+ {},
+};
+
+static struct platform_driver s5pv210_audss_clk_driver = {
+ .driver = {
+ .name = "s5pv210-audss-clk",
+ .owner = THIS_MODULE,
+ .of_match_table = s5pv210_audss_clk_of_match,
+ },
+ .probe = s5pv210_audss_clk_probe,
+ .remove = s5pv210_audss_clk_remove,
+};
+
+static int __init s5pv210_audss_clk_init(void)
+{
+ return platform_driver_register(&s5pv210_audss_clk_driver);
+}
+core_initcall(s5pv210_audss_clk_init);
+
+static void __exit s5pv210_audss_clk_exit(void)
+{
+ platform_driver_unregister(&s5pv210_audss_clk_driver);
+}
+module_exit(s5pv210_audss_clk_exit);
+
+MODULE_AUTHOR("Tomasz Figa <t.figa@samsung.com>");
+MODULE_DESCRIPTION("S5PV210 Audio Subsystem Clock Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s5pv210-audss-clk");
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
new file mode 100644
index 000000000000..d270a2084644
--- /dev/null
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Author: Mateusz Krawczuk <m.krawczuk@partner.samsung.com>
+ *
+ * Based on clock drivers for S3C64xx and Exynos4 SoCs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for all S5PC110/S5PV210 SoCs.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#include <dt-bindings/clock/s5pv210.h>
+
+/* S5PC110/S5PV210 clock controller register offsets */
+#define APLL_LOCK 0x0000
+#define MPLL_LOCK 0x0008
+#define EPLL_LOCK 0x0010
+#define VPLL_LOCK 0x0020
+#define APLL_CON0 0x0100
+#define APLL_CON1 0x0104
+#define MPLL_CON 0x0108
+#define EPLL_CON0 0x0110
+#define EPLL_CON1 0x0114
+#define VPLL_CON 0x0120
+#define CLK_SRC0 0x0200
+#define CLK_SRC1 0x0204
+#define CLK_SRC2 0x0208
+#define CLK_SRC3 0x020c
+#define CLK_SRC4 0x0210
+#define CLK_SRC5 0x0214
+#define CLK_SRC6 0x0218
+#define CLK_SRC_MASK0 0x0280
+#define CLK_SRC_MASK1 0x0284
+#define CLK_DIV0 0x0300
+#define CLK_DIV1 0x0304
+#define CLK_DIV2 0x0308
+#define CLK_DIV3 0x030c
+#define CLK_DIV4 0x0310
+#define CLK_DIV5 0x0314
+#define CLK_DIV6 0x0318
+#define CLK_DIV7 0x031c
+#define CLK_GATE_MAIN0 0x0400
+#define CLK_GATE_MAIN1 0x0404
+#define CLK_GATE_MAIN2 0x0408
+#define CLK_GATE_PERI0 0x0420
+#define CLK_GATE_PERI1 0x0424
+#define CLK_GATE_SCLK0 0x0440
+#define CLK_GATE_SCLK1 0x0444
+#define CLK_GATE_IP0 0x0460
+#define CLK_GATE_IP1 0x0464
+#define CLK_GATE_IP2 0x0468
+#define CLK_GATE_IP3 0x046c
+#define CLK_GATE_IP4 0x0470
+#define CLK_GATE_BLOCK 0x0480
+#define CLK_GATE_IP5 0x0484
+#define CLK_OUT 0x0500
+#define MISC 0xe000
+#define OM_STAT 0xe100
+
+/* IDs of PLLs available on S5PV210/S5P6442 SoCs */
+enum {
+ apll,
+ mpll,
+ epll,
+ vpll,
+};
+
+/* IDs of external clocks (used for legacy boards) */
+enum {
+ xxti,
+ xusbxti,
+};
+
+static void __iomem *reg_base;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *s5pv210_clk_dump;
+
+/* List of registers that need to be preserved across suspend/resume. */
+static unsigned long s5pv210_clk_regs[] __initdata = {
+ CLK_SRC0,
+ CLK_SRC1,
+ CLK_SRC2,
+ CLK_SRC3,
+ CLK_SRC4,
+ CLK_SRC5,
+ CLK_SRC6,
+ CLK_SRC_MASK0,
+ CLK_SRC_MASK1,
+ CLK_DIV0,
+ CLK_DIV1,
+ CLK_DIV2,
+ CLK_DIV3,
+ CLK_DIV4,
+ CLK_DIV5,
+ CLK_DIV6,
+ CLK_DIV7,
+ CLK_GATE_MAIN0,
+ CLK_GATE_MAIN1,
+ CLK_GATE_MAIN2,
+ CLK_GATE_PERI0,
+ CLK_GATE_PERI1,
+ CLK_GATE_SCLK0,
+ CLK_GATE_SCLK1,
+ CLK_GATE_IP0,
+ CLK_GATE_IP1,
+ CLK_GATE_IP2,
+ CLK_GATE_IP3,
+ CLK_GATE_IP4,
+ CLK_GATE_IP5,
+ CLK_GATE_BLOCK,
+ APLL_LOCK,
+ MPLL_LOCK,
+ EPLL_LOCK,
+ VPLL_LOCK,
+ APLL_CON0,
+ APLL_CON1,
+ MPLL_CON,
+ EPLL_CON0,
+ EPLL_CON1,
+ VPLL_CON,
+ CLK_OUT,
+};
+
+static int s5pv210_clk_suspend(void)
+{
+ samsung_clk_save(reg_base, s5pv210_clk_dump,
+ ARRAY_SIZE(s5pv210_clk_regs));
+ return 0;
+}
+
+static void s5pv210_clk_resume(void)
+{
+ samsung_clk_restore(reg_base, s5pv210_clk_dump,
+ ARRAY_SIZE(s5pv210_clk_regs));
+}
+
+static struct syscore_ops s5pv210_clk_syscore_ops = {
+ .suspend = s5pv210_clk_suspend,
+ .resume = s5pv210_clk_resume,
+};
+
+static void s5pv210_clk_sleep_init(void)
+{
+ s5pv210_clk_dump =
+ samsung_clk_alloc_reg_dump(s5pv210_clk_regs,
+ ARRAY_SIZE(s5pv210_clk_regs));
+ if (!s5pv210_clk_dump) {
+ pr_warn("%s: Failed to allocate sleep save data\n", __func__);
+ return;
+ }
+
+ register_syscore_ops(&s5pv210_clk_syscore_ops);
+}
+#else
+static inline void s5pv210_clk_sleep_init(void) { }
+#endif
+
+/* Mux parent lists. */
+static const char *fin_pll_p[] __initconst = {
+ "xxti",
+ "xusbxti"
+};
+
+static const char *mout_apll_p[] __initconst = {
+ "fin_pll",
+ "fout_apll"
+};
+
+static const char *mout_mpll_p[] __initconst = {
+ "fin_pll",
+ "fout_mpll"
+};
+
+static const char *mout_epll_p[] __initconst = {
+ "fin_pll",
+ "fout_epll"
+};
+
+static const char *mout_vpllsrc_p[] __initconst = {
+ "fin_pll",
+ "sclk_hdmi27m"
+};
+
+static const char *mout_vpll_p[] __initconst = {
+ "mout_vpllsrc",
+ "fout_vpll"
+};
+
+static const char *mout_group1_p[] __initconst = {
+ "dout_a2m",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll"
+};
+
+static const char *mout_group2_p[] __initconst = {
+ "xxti",
+ "xusbxti",
+ "sclk_hdmi27m",
+ "sclk_usbphy0",
+ "sclk_usbphy1",
+ "sclk_hdmiphy",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll",
+};
+
+static const char *mout_audio0_p[] __initconst = {
+ "xxti",
+ "pcmcdclk0",
+ "sclk_hdmi27m",
+ "sclk_usbphy0",
+ "sclk_usbphy1",
+ "sclk_hdmiphy",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll",
+};
+
+static const char *mout_audio1_p[] __initconst = {
+ "i2scdclk1",
+ "pcmcdclk1",
+ "sclk_hdmi27m",
+ "sclk_usbphy0",
+ "sclk_usbphy1",
+ "sclk_hdmiphy",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll",
+};
+
+static const char *mout_audio2_p[] __initconst = {
+ "i2scdclk2",
+ "pcmcdclk2",
+ "sclk_hdmi27m",
+ "sclk_usbphy0",
+ "sclk_usbphy1",
+ "sclk_hdmiphy",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll",
+};
+
+static const char *mout_spdif_p[] __initconst = {
+ "dout_audio0",
+ "dout_audio1",
+ "dout_audio3",
+};
+
+static const char *mout_group3_p[] __initconst = {
+ "mout_apll",
+ "mout_mpll"
+};
+
+static const char *mout_group4_p[] __initconst = {
+ "mout_mpll",
+ "dout_a2m"
+};
+
+static const char *mout_flash_p[] __initconst = {
+ "dout_hclkd",
+ "dout_hclkp"
+};
+
+static const char *mout_dac_p[] __initconst = {
+ "mout_vpll",
+ "sclk_hdmiphy"
+};
+
+static const char *mout_hdmi_p[] __initconst = {
+ "sclk_hdmiphy",
+ "dout_tblk"
+};
+
+static const char *mout_mixer_p[] __initconst = {
+ "mout_dac",
+ "mout_hdmi"
+};
+
+static const char *mout_vpll_6442_p[] __initconst = {
+ "fin_pll",
+ "fout_vpll"
+};
+
+static const char *mout_mixer_6442_p[] __initconst = {
+ "mout_vpll",
+ "dout_mixer"
+};
+
+static const char *mout_d0sync_6442_p[] __initconst = {
+ "mout_dsys",
+ "div_apll"
+};
+
+static const char *mout_d1sync_6442_p[] __initconst = {
+ "mout_psys",
+ "div_apll"
+};
+
+static const char *mout_group2_6442_p[] __initconst = {
+ "fin_pll",
+ "none",
+ "none",
+ "sclk_usbphy0",
+ "none",
+ "none",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll",
+};
+
+static const char *mout_audio0_6442_p[] __initconst = {
+ "fin_pll",
+ "pcmcdclk0",
+ "none",
+ "sclk_usbphy0",
+ "none",
+ "none",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll",
+};
+
+static const char *mout_audio1_6442_p[] __initconst = {
+ "i2scdclk1",
+ "pcmcdclk1",
+ "none",
+ "sclk_usbphy0",
+ "none",
+ "none",
+ "mout_mpll",
+ "mout_epll",
+ "mout_vpll",
+ "fin_pll",
+};
+
+static const char *mout_clksel_p[] __initconst = {
+ "fout_apll_clkout",
+ "fout_mpll_clkout",
+ "fout_epll",
+ "fout_vpll",
+ "sclk_usbphy0",
+ "sclk_usbphy1",
+ "sclk_hdmiphy",
+ "rtc",
+ "rtc_tick",
+ "dout_hclkm",
+ "dout_pclkm",
+ "dout_hclkd",
+ "dout_pclkd",
+ "dout_hclkp",
+ "dout_pclkp",
+ "dout_apll_clkout",
+ "dout_hpm",
+ "xxti",
+ "xusbxti",
+ "div_dclk"
+};
+
+static const char *mout_clksel_6442_p[] __initconst = {
+ "fout_apll_clkout",
+ "fout_mpll_clkout",
+ "fout_epll",
+ "fout_vpll",
+ "sclk_usbphy0",
+ "none",
+ "none",
+ "rtc",
+ "rtc_tick",
+ "none",
+ "none",
+ "dout_hclkd",
+ "dout_pclkd",
+ "dout_hclkp",
+ "dout_pclkp",
+ "dout_apll_clkout",
+ "none",
+ "fin_pll",
+ "none",
+ "div_dclk"
+};
+
+static const char *mout_clkout_p[] __initconst = {
+ "dout_clkout",
+ "none",
+ "xxti",
+ "xusbxti"
+};
+
+/* Common fixed factor clocks. */
+static struct samsung_fixed_factor_clock ffactor_clks[] __initdata = {
+ FFACTOR(FOUT_APLL_CLKOUT, "fout_apll_clkout", "fout_apll", 1, 4, 0),
+ FFACTOR(FOUT_MPLL_CLKOUT, "fout_mpll_clkout", "fout_mpll", 1, 2, 0),
+ FFACTOR(DOUT_APLL_CLKOUT, "dout_apll_clkout", "dout_apll", 1, 4, 0),
+};
+
+/* PLL input mux (fin_pll), which needs to be registered before PLLs. */
+static struct samsung_mux_clock early_mux_clks[] __initdata = {
+ MUX_F(FIN_PLL, "fin_pll", fin_pll_p, OM_STAT, 0, 1,
+ CLK_MUX_READ_ONLY, 0),
+};
+
+/* Common clock muxes. */
+static struct samsung_mux_clock mux_clks[] __initdata = {
+ MUX(MOUT_FLASH, "mout_flash", mout_flash_p, CLK_SRC0, 28, 1),
+ MUX(MOUT_PSYS, "mout_psys", mout_group4_p, CLK_SRC0, 24, 1),
+ MUX(MOUT_DSYS, "mout_dsys", mout_group4_p, CLK_SRC0, 20, 1),
+ MUX(MOUT_MSYS, "mout_msys", mout_group3_p, CLK_SRC0, 16, 1),
+ MUX(MOUT_EPLL, "mout_epll", mout_epll_p, CLK_SRC0, 8, 1),
+ MUX(MOUT_MPLL, "mout_mpll", mout_mpll_p, CLK_SRC0, 4, 1),
+ MUX(MOUT_APLL, "mout_apll", mout_apll_p, CLK_SRC0, 0, 1),
+
+ MUX(MOUT_CLKOUT, "mout_clkout", mout_clkout_p, MISC, 8, 2),
+};
+
+/* S5PV210-specific clock muxes. */
+static struct samsung_mux_clock s5pv210_mux_clks[] __initdata = {
+ MUX(MOUT_VPLL, "mout_vpll", mout_vpll_p, CLK_SRC0, 12, 1),
+
+ MUX(MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, CLK_SRC1, 28, 1),
+ MUX(MOUT_CSIS, "mout_csis", mout_group2_p, CLK_SRC1, 24, 4),
+ MUX(MOUT_FIMD, "mout_fimd", mout_group2_p, CLK_SRC1, 20, 4),
+ MUX(MOUT_CAM1, "mout_cam1", mout_group2_p, CLK_SRC1, 16, 4),
+ MUX(MOUT_CAM0, "mout_cam0", mout_group2_p, CLK_SRC1, 12, 4),
+ MUX(MOUT_DAC, "mout_dac", mout_dac_p, CLK_SRC1, 8, 1),
+ MUX(MOUT_MIXER, "mout_mixer", mout_mixer_p, CLK_SRC1, 4, 1),
+ MUX(MOUT_HDMI, "mout_hdmi", mout_hdmi_p, CLK_SRC1, 0, 1),
+
+ MUX(MOUT_G2D, "mout_g2d", mout_group1_p, CLK_SRC2, 8, 2),
+ MUX(MOUT_MFC, "mout_mfc", mout_group1_p, CLK_SRC2, 4, 2),
+ MUX(MOUT_G3D, "mout_g3d", mout_group1_p, CLK_SRC2, 0, 2),
+
+ MUX(MOUT_FIMC2, "mout_fimc2", mout_group2_p, CLK_SRC3, 20, 4),
+ MUX(MOUT_FIMC1, "mout_fimc1", mout_group2_p, CLK_SRC3, 16, 4),
+ MUX(MOUT_FIMC0, "mout_fimc0", mout_group2_p, CLK_SRC3, 12, 4),
+
+ MUX(MOUT_UART3, "mout_uart3", mout_group2_p, CLK_SRC4, 28, 4),
+ MUX(MOUT_UART2, "mout_uart2", mout_group2_p, CLK_SRC4, 24, 4),
+ MUX(MOUT_UART1, "mout_uart1", mout_group2_p, CLK_SRC4, 20, 4),
+ MUX(MOUT_UART0, "mout_uart0", mout_group2_p, CLK_SRC4, 16, 4),
+ MUX(MOUT_MMC3, "mout_mmc3", mout_group2_p, CLK_SRC4, 12, 4),
+ MUX(MOUT_MMC2, "mout_mmc2", mout_group2_p, CLK_SRC4, 8, 4),
+ MUX(MOUT_MMC1, "mout_mmc1", mout_group2_p, CLK_SRC4, 4, 4),
+ MUX(MOUT_MMC0, "mout_mmc0", mout_group2_p, CLK_SRC4, 0, 4),
+
+ MUX(MOUT_PWM, "mout_pwm", mout_group2_p, CLK_SRC5, 12, 4),
+ MUX(MOUT_SPI1, "mout_spi1", mout_group2_p, CLK_SRC5, 4, 4),
+ MUX(MOUT_SPI0, "mout_spi0", mout_group2_p, CLK_SRC5, 0, 4),
+
+ MUX(MOUT_DMC0, "mout_dmc0", mout_group1_p, CLK_SRC6, 24, 2),
+ MUX(MOUT_PWI, "mout_pwi", mout_group2_p, CLK_SRC6, 20, 4),
+ MUX(MOUT_HPM, "mout_hpm", mout_group3_p, CLK_SRC6, 16, 1),
+ MUX(MOUT_SPDIF, "mout_spdif", mout_spdif_p, CLK_SRC6, 12, 2),
+ MUX(MOUT_AUDIO2, "mout_audio2", mout_audio2_p, CLK_SRC6, 8, 4),
+ MUX(MOUT_AUDIO1, "mout_audio1", mout_audio1_p, CLK_SRC6, 4, 4),
+ MUX(MOUT_AUDIO0, "mout_audio0", mout_audio0_p, CLK_SRC6, 0, 4),
+
+ MUX(MOUT_CLKSEL, "mout_clksel", mout_clksel_p, CLK_OUT, 12, 5),
+};
+
+/* S5P6442-specific clock muxes. */
+static struct samsung_mux_clock s5p6442_mux_clks[] __initdata = {
+ MUX(MOUT_VPLL, "mout_vpll", mout_vpll_6442_p, CLK_SRC0, 12, 1),
+
+ MUX(MOUT_FIMD, "mout_fimd", mout_group2_6442_p, CLK_SRC1, 20, 4),
+ MUX(MOUT_CAM1, "mout_cam1", mout_group2_6442_p, CLK_SRC1, 16, 4),
+ MUX(MOUT_CAM0, "mout_cam0", mout_group2_6442_p, CLK_SRC1, 12, 4),
+ MUX(MOUT_MIXER, "mout_mixer", mout_mixer_6442_p, CLK_SRC1, 4, 1),
+
+ MUX(MOUT_D0SYNC, "mout_d0sync", mout_d0sync_6442_p, CLK_SRC2, 28, 1),
+ MUX(MOUT_D1SYNC, "mout_d1sync", mout_d1sync_6442_p, CLK_SRC2, 24, 1),
+
+ MUX(MOUT_FIMC2, "mout_fimc2", mout_group2_6442_p, CLK_SRC3, 20, 4),
+ MUX(MOUT_FIMC1, "mout_fimc1", mout_group2_6442_p, CLK_SRC3, 16, 4),
+ MUX(MOUT_FIMC0, "mout_fimc0", mout_group2_6442_p, CLK_SRC3, 12, 4),
+
+ MUX(MOUT_UART2, "mout_uart2", mout_group2_6442_p, CLK_SRC4, 24, 4),
+ MUX(MOUT_UART1, "mout_uart1", mout_group2_6442_p, CLK_SRC4, 20, 4),
+ MUX(MOUT_UART0, "mout_uart0", mout_group2_6442_p, CLK_SRC4, 16, 4),
+ MUX(MOUT_MMC2, "mout_mmc2", mout_group2_6442_p, CLK_SRC4, 8, 4),
+ MUX(MOUT_MMC1, "mout_mmc1", mout_group2_6442_p, CLK_SRC4, 4, 4),
+ MUX(MOUT_MMC0, "mout_mmc0", mout_group2_6442_p, CLK_SRC4, 0, 4),
+
+ MUX(MOUT_PWM, "mout_pwm", mout_group2_6442_p, CLK_SRC5, 12, 4),
+ MUX(MOUT_SPI0, "mout_spi0", mout_group2_6442_p, CLK_SRC5, 0, 4),
+
+ MUX(MOUT_AUDIO1, "mout_audio1", mout_audio1_6442_p, CLK_SRC6, 4, 4),
+ MUX(MOUT_AUDIO0, "mout_audio0", mout_audio0_6442_p, CLK_SRC6, 0, 4),
+
+ MUX(MOUT_CLKSEL, "mout_clksel", mout_clksel_6442_p, CLK_OUT, 12, 5),
+};
+
+/* S5PV210-specific fixed rate clocks generated inside the SoC. */
+static struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initdata = {
+ FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+ FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
+ FRATE(SCLK_USBPHY1, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
+};
+
+/* S5P6442-specific fixed rate clocks generated inside the SoC. */
+static struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initdata = {
+ FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 30000000),
+};
+
+/* Common clock dividers. */
+static struct samsung_div_clock div_clks[] __initdata = {
+ DIV(DOUT_PCLKP, "dout_pclkp", "dout_hclkp", CLK_DIV0, 28, 3),
+ DIV(DOUT_PCLKD, "dout_pclkd", "dout_hclkd", CLK_DIV0, 20, 3),
+ DIV(DOUT_A2M, "dout_a2m", "mout_apll", CLK_DIV0, 4, 3),
+ DIV(DOUT_APLL, "dout_apll", "mout_msys", CLK_DIV0, 0, 3),
+
+ DIV(DOUT_FIMD, "dout_fimd", "mout_fimd", CLK_DIV1, 20, 4),
+ DIV(DOUT_CAM1, "dout_cam1", "mout_cam1", CLK_DIV1, 16, 4),
+ DIV(DOUT_CAM0, "dout_cam0", "mout_cam0", CLK_DIV1, 12, 4),
+
+ DIV(DOUT_FIMC2, "dout_fimc2", "mout_fimc2", CLK_DIV3, 20, 4),
+ DIV(DOUT_FIMC1, "dout_fimc1", "mout_fimc1", CLK_DIV3, 16, 4),
+ DIV(DOUT_FIMC0, "dout_fimc0", "mout_fimc0", CLK_DIV3, 12, 4),
+
+ DIV(DOUT_UART2, "dout_uart2", "mout_uart2", CLK_DIV4, 24, 4),
+ DIV(DOUT_UART1, "dout_uart1", "mout_uart1", CLK_DIV4, 20, 4),
+ DIV(DOUT_UART0, "dout_uart0", "mout_uart0", CLK_DIV4, 16, 4),
+ DIV(DOUT_MMC2, "dout_mmc2", "mout_mmc2", CLK_DIV4, 8, 4),
+ DIV(DOUT_MMC1, "dout_mmc1", "mout_mmc1", CLK_DIV4, 4, 4),
+ DIV(DOUT_MMC0, "dout_mmc0", "mout_mmc0", CLK_DIV4, 0, 4),
+
+ DIV(DOUT_PWM, "dout_pwm", "mout_pwm", CLK_DIV5, 12, 4),
+ DIV(DOUT_SPI0, "dout_spi0", "mout_spi0", CLK_DIV5, 0, 4),
+
+ DIV(DOUT_FLASH, "dout_flash", "mout_flash", CLK_DIV6, 12, 3),
+ DIV(DOUT_AUDIO1, "dout_audio1", "mout_audio1", CLK_DIV6, 4, 4),
+ DIV(DOUT_AUDIO0, "dout_audio0", "mout_audio0", CLK_DIV6, 0, 4),
+
+ DIV(DOUT_CLKOUT, "dout_clkout", "mout_clksel", CLK_OUT, 20, 4),
+};
+
+/* S5PV210-specific clock dividers. */
+static struct samsung_div_clock s5pv210_div_clks[] __initdata = {
+ DIV(DOUT_HCLKP, "dout_hclkp", "mout_psys", CLK_DIV0, 24, 4),
+ DIV(DOUT_HCLKD, "dout_hclkd", "mout_dsys", CLK_DIV0, 16, 4),
+ DIV(DOUT_PCLKM, "dout_pclkm", "dout_hclkm", CLK_DIV0, 12, 3),
+ DIV(DOUT_HCLKM, "dout_hclkm", "dout_apll", CLK_DIV0, 8, 3),
+
+ DIV(DOUT_CSIS, "dout_csis", "mout_csis", CLK_DIV1, 28, 4),
+ DIV(DOUT_TBLK, "dout_tblk", "mout_vpll", CLK_DIV1, 0, 4),
+
+ DIV(DOUT_G2D, "dout_g2d", "mout_g2d", CLK_DIV2, 8, 4),
+ DIV(DOUT_MFC, "dout_mfc", "mout_mfc", CLK_DIV2, 4, 4),
+ DIV(DOUT_G3D, "dout_g3d", "mout_g3d", CLK_DIV2, 0, 4),
+
+ DIV(DOUT_UART3, "dout_uart3", "mout_uart3", CLK_DIV4, 28, 4),
+ DIV(DOUT_MMC3, "dout_mmc3", "mout_mmc3", CLK_DIV4, 12, 4),
+
+ DIV(DOUT_SPI1, "dout_spi1", "mout_spi1", CLK_DIV5, 4, 4),
+
+ DIV(DOUT_DMC0, "dout_dmc0", "mout_dmc0", CLK_DIV6, 28, 4),
+ DIV(DOUT_PWI, "dout_pwi", "mout_pwi", CLK_DIV6, 24, 4),
+ DIV(DOUT_HPM, "dout_hpm", "dout_copy", CLK_DIV6, 20, 3),
+ DIV(DOUT_COPY, "dout_copy", "mout_hpm", CLK_DIV6, 16, 3),
+ DIV(DOUT_AUDIO2, "dout_audio2", "mout_audio2", CLK_DIV6, 8, 4),
+
+ DIV(DOUT_DPM, "dout_dpm", "dout_pclkp", CLK_DIV7, 8, 7),
+ DIV(DOUT_DVSEM, "dout_dvsem", "dout_pclkp", CLK_DIV7, 0, 7),
+};
+
+/* S5P6442-specific clock dividers. */
+static struct samsung_div_clock s5p6442_div_clks[] __initdata = {
+ DIV(DOUT_HCLKP, "dout_hclkp", "mout_d1sync", CLK_DIV0, 24, 4),
+ DIV(DOUT_HCLKD, "dout_hclkd", "mout_d0sync", CLK_DIV0, 16, 4),
+
+ DIV(DOUT_MIXER, "dout_mixer", "mout_vpll", CLK_DIV1, 0, 4),
+};
+
+/* Common clock gates. */
+static struct samsung_gate_clock gate_clks[] __initdata = {
+ GATE(CLK_ROTATOR, "rotator", "dout_hclkd", CLK_GATE_IP0, 29, 0, 0),
+ GATE(CLK_FIMC2, "fimc2", "dout_hclkd", CLK_GATE_IP0, 26, 0, 0),
+ GATE(CLK_FIMC1, "fimc1", "dout_hclkd", CLK_GATE_IP0, 25, 0, 0),
+ GATE(CLK_FIMC0, "fimc0", "dout_hclkd", CLK_GATE_IP0, 24, 0, 0),
+ GATE(CLK_PDMA0, "pdma0", "dout_hclkp", CLK_GATE_IP0, 3, 0, 0),
+ GATE(CLK_MDMA, "mdma", "dout_hclkd", CLK_GATE_IP0, 2, 0, 0),
+
+ GATE(CLK_SROMC, "sromc", "dout_hclkp", CLK_GATE_IP1, 26, 0, 0),
+ GATE(CLK_NANDXL, "nandxl", "dout_hclkp", CLK_GATE_IP1, 24, 0, 0),
+ GATE(CLK_USB_OTG, "usb_otg", "dout_hclkp", CLK_GATE_IP1, 16, 0, 0),
+ GATE(CLK_TVENC, "tvenc", "dout_hclkd", CLK_GATE_IP1, 10, 0, 0),
+ GATE(CLK_MIXER, "mixer", "dout_hclkd", CLK_GATE_IP1, 9, 0, 0),
+ GATE(CLK_VP, "vp", "dout_hclkd", CLK_GATE_IP1, 8, 0, 0),
+ GATE(CLK_FIMD, "fimd", "dout_hclkd", CLK_GATE_IP1, 0, 0, 0),
+
+ GATE(CLK_HSMMC2, "hsmmc2", "dout_hclkp", CLK_GATE_IP2, 18, 0, 0),
+ GATE(CLK_HSMMC1, "hsmmc1", "dout_hclkp", CLK_GATE_IP2, 17, 0, 0),
+ GATE(CLK_HSMMC0, "hsmmc0", "dout_hclkp", CLK_GATE_IP2, 16, 0, 0),
+ GATE(CLK_MODEMIF, "modemif", "dout_hclkp", CLK_GATE_IP2, 9, 0, 0),
+ GATE(CLK_SECSS, "secss", "dout_hclkp", CLK_GATE_IP2, 0, 0, 0),
+
+ GATE(CLK_PCM1, "pcm1", "dout_pclkp", CLK_GATE_IP3, 29, 0, 0),
+ GATE(CLK_PCM0, "pcm0", "dout_pclkp", CLK_GATE_IP3, 28, 0, 0),
+ GATE(CLK_TSADC, "tsadc", "dout_pclkp", CLK_GATE_IP3, 24, 0, 0),
+ GATE(CLK_PWM, "pwm", "dout_pclkp", CLK_GATE_IP3, 23, 0, 0),
+ GATE(CLK_WDT, "watchdog", "dout_pclkp", CLK_GATE_IP3, 22, 0, 0),
+ GATE(CLK_KEYIF, "keyif", "dout_pclkp", CLK_GATE_IP3, 21, 0, 0),
+ GATE(CLK_UART2, "uart2", "dout_pclkp", CLK_GATE_IP3, 19, 0, 0),
+ GATE(CLK_UART1, "uart1", "dout_pclkp", CLK_GATE_IP3, 18, 0, 0),
+ GATE(CLK_UART0, "uart0", "dout_pclkp", CLK_GATE_IP3, 17, 0, 0),
+ GATE(CLK_SYSTIMER, "systimer", "dout_pclkp", CLK_GATE_IP3, 16, 0, 0),
+ GATE(CLK_RTC, "rtc", "dout_pclkp", CLK_GATE_IP3, 15, 0, 0),
+ GATE(CLK_SPI0, "spi0", "dout_pclkp", CLK_GATE_IP3, 12, 0, 0),
+ GATE(CLK_I2C2, "i2c2", "dout_pclkp", CLK_GATE_IP3, 9, 0, 0),
+ GATE(CLK_I2C0, "i2c0", "dout_pclkp", CLK_GATE_IP3, 7, 0, 0),
+ GATE(CLK_I2S1, "i2s1", "dout_pclkp", CLK_GATE_IP3, 5, 0, 0),
+ GATE(CLK_I2S0, "i2s0", "dout_pclkp", CLK_GATE_IP3, 4, 0, 0),
+
+ GATE(CLK_SECKEY, "seckey", "dout_pclkp", CLK_GATE_IP4, 3, 0, 0),
+ GATE(CLK_CHIPID, "chipid", "dout_pclkp", CLK_GATE_IP4, 0, 0, 0),
+
+ GATE(SCLK_AUDIO1, "sclk_audio1", "dout_audio1", CLK_SRC_MASK0, 25,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_AUDIO0, "sclk_audio0", "dout_audio0", CLK_SRC_MASK0, 24,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_PWM, "sclk_pwm", "dout_pwm", CLK_SRC_MASK0, 19,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_SPI0, "sclk_spi0", "dout_spi0", CLK_SRC_MASK0, 16,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_UART2, "sclk_uart2", "dout_uart2", CLK_SRC_MASK0, 14,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_UART1, "sclk_uart1", "dout_uart1", CLK_SRC_MASK0, 13,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_UART0, "sclk_uart0", "dout_uart0", CLK_SRC_MASK0, 12,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_MMC2, "sclk_mmc2", "dout_mmc2", CLK_SRC_MASK0, 10,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_MMC1, "sclk_mmc1", "dout_mmc1", CLK_SRC_MASK0, 9,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_MMC0, "sclk_mmc0", "dout_mmc0", CLK_SRC_MASK0, 8,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_FIMD, "sclk_fimd", "dout_fimd", CLK_SRC_MASK0, 5,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_CAM1, "sclk_cam1", "dout_cam1", CLK_SRC_MASK0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_CAM0, "sclk_cam0", "dout_cam0", CLK_SRC_MASK0, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_MIXER, "sclk_mixer", "mout_mixer", CLK_SRC_MASK0, 1,
+ CLK_SET_RATE_PARENT, 0),
+
+ GATE(SCLK_FIMC2, "sclk_fimc2", "dout_fimc2", CLK_SRC_MASK1, 4,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_FIMC1, "sclk_fimc1", "dout_fimc1", CLK_SRC_MASK1, 3,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_FIMC0, "sclk_fimc0", "dout_fimc0", CLK_SRC_MASK1, 2,
+ CLK_SET_RATE_PARENT, 0),
+};
+
+/* S5PV210-specific clock gates. */
+static struct samsung_gate_clock s5pv210_gate_clks[] __initdata = {
+ GATE(CLK_CSIS, "clk_csis", "dout_hclkd", CLK_GATE_IP0, 31, 0, 0),
+ GATE(CLK_MFC, "mfc", "dout_hclkm", CLK_GATE_IP0, 16, 0, 0),
+ GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0),
+ GATE(CLK_G3D, "g3d", "dout_hclkm", CLK_GATE_IP0, 8, 0, 0),
+ GATE(CLK_IMEM, "imem", "dout_hclkm", CLK_GATE_IP0, 5, 0, 0),
+ GATE(CLK_PDMA1, "pdma1", "dout_hclkp", CLK_GATE_IP0, 4, 0, 0),
+
+ GATE(CLK_NFCON, "nfcon", "dout_hclkp", CLK_GATE_IP1, 28, 0, 0),
+ GATE(CLK_CFCON, "cfcon", "dout_hclkp", CLK_GATE_IP1, 25, 0, 0),
+ GATE(CLK_USB_HOST, "usb_host", "dout_hclkp", CLK_GATE_IP1, 17, 0, 0),
+ GATE(CLK_HDMI, "hdmi", "dout_hclkd", CLK_GATE_IP1, 11, 0, 0),
+ GATE(CLK_DSIM, "dsim", "dout_pclkd", CLK_GATE_IP1, 2, 0, 0),
+
+ GATE(CLK_TZIC3, "tzic3", "dout_hclkm", CLK_GATE_IP2, 31, 0, 0),
+ GATE(CLK_TZIC2, "tzic2", "dout_hclkm", CLK_GATE_IP2, 30, 0, 0),
+ GATE(CLK_TZIC1, "tzic1", "dout_hclkm", CLK_GATE_IP2, 29, 0, 0),
+ GATE(CLK_TZIC0, "tzic0", "dout_hclkm", CLK_GATE_IP2, 28, 0, 0),
+ GATE(CLK_TSI, "tsi", "dout_hclkd", CLK_GATE_IP2, 20, 0, 0),
+ GATE(CLK_HSMMC3, "hsmmc3", "dout_hclkp", CLK_GATE_IP2, 19, 0, 0),
+ GATE(CLK_JTAG, "jtag", "dout_hclkp", CLK_GATE_IP2, 11, 0, 0),
+ GATE(CLK_CORESIGHT, "coresight", "dout_pclkp", CLK_GATE_IP2, 8, 0, 0),
+ GATE(CLK_SDM, "sdm", "dout_pclkm", CLK_GATE_IP2, 1, 0, 0),
+
+ GATE(CLK_PCM2, "pcm2", "dout_pclkp", CLK_GATE_IP3, 30, 0, 0),
+ GATE(CLK_UART3, "uart3", "dout_pclkp", CLK_GATE_IP3, 20, 0, 0),
+ GATE(CLK_SPI1, "spi1", "dout_pclkp", CLK_GATE_IP3, 13, 0, 0),
+ GATE(CLK_I2C_HDMI_PHY, "i2c_hdmi_phy", "dout_pclkd",
+ CLK_GATE_IP3, 11, 0, 0),
+ GATE(CLK_I2C1, "i2c1", "dout_pclkd", CLK_GATE_IP3, 10, 0, 0),
+ GATE(CLK_I2S2, "i2s2", "dout_pclkp", CLK_GATE_IP3, 6, 0, 0),
+ GATE(CLK_AC97, "ac97", "dout_pclkp", CLK_GATE_IP3, 1, 0, 0),
+ GATE(CLK_SPDIF, "spdif", "dout_pclkp", CLK_GATE_IP3, 0, 0, 0),
+
+ GATE(CLK_TZPC3, "tzpc.3", "dout_pclkd", CLK_GATE_IP4, 8, 0, 0),
+ GATE(CLK_TZPC2, "tzpc.2", "dout_pclkd", CLK_GATE_IP4, 7, 0, 0),
+ GATE(CLK_TZPC1, "tzpc.1", "dout_pclkp", CLK_GATE_IP4, 6, 0, 0),
+ GATE(CLK_TZPC0, "tzpc.0", "dout_pclkm", CLK_GATE_IP4, 5, 0, 0),
+ GATE(CLK_IEM_APC, "iem_apc", "dout_pclkp", CLK_GATE_IP4, 2, 0, 0),
+ GATE(CLK_IEM_IEC, "iem_iec", "dout_pclkp", CLK_GATE_IP4, 1, 0, 0),
+
+ GATE(CLK_JPEG, "jpeg", "dout_hclkd", CLK_GATE_IP5, 29, 0, 0),
+
+ GATE(SCLK_SPDIF, "sclk_spdif", "mout_spdif", CLK_SRC_MASK0, 27,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_AUDIO2, "sclk_audio2", "dout_audio2", CLK_SRC_MASK0, 26,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_SPI1, "sclk_spi1", "dout_spi1", CLK_SRC_MASK0, 17,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_UART3, "sclk_uart3", "dout_uart3", CLK_SRC_MASK0, 15,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_MMC3, "sclk_mmc3", "dout_mmc3", CLK_SRC_MASK0, 11,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_CSIS, "sclk_csis", "dout_csis", CLK_SRC_MASK0, 6,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_DAC, "sclk_dac", "mout_dac", CLK_SRC_MASK0, 2,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(SCLK_HDMI, "sclk_hdmi", "mout_hdmi", CLK_SRC_MASK0, 0,
+ CLK_SET_RATE_PARENT, 0),
+};
+
+/* S5P6442-specific clock gates. */
+static struct samsung_gate_clock s5p6442_gate_clks[] __initdata = {
+ GATE(CLK_JPEG, "jpeg", "dout_hclkd", CLK_GATE_IP0, 28, 0, 0),
+ GATE(CLK_MFC, "mfc", "dout_hclkd", CLK_GATE_IP0, 16, 0, 0),
+ GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0),
+ GATE(CLK_G3D, "g3d", "dout_hclkd", CLK_GATE_IP0, 8, 0, 0),
+ GATE(CLK_IMEM, "imem", "dout_hclkd", CLK_GATE_IP0, 5, 0, 0),
+
+ GATE(CLK_ETB, "etb", "dout_hclkd", CLK_GATE_IP1, 31, 0, 0),
+ GATE(CLK_ETM, "etm", "dout_hclkd", CLK_GATE_IP1, 30, 0, 0),
+
+ GATE(CLK_I2C1, "i2c1", "dout_pclkp", CLK_GATE_IP3, 8, 0, 0),
+
+ GATE(SCLK_DAC, "sclk_dac", "mout_vpll", CLK_SRC_MASK0, 2,
+ CLK_SET_RATE_PARENT, 0),
+};
+
+/*
+ * Clock aliases for legacy clkdev look-up.
+ * NOTE: Needed only to support legacy board files.
+ */
+static struct samsung_clock_alias s5pv210_aliases[] = {
+ ALIAS(DOUT_APLL, NULL, "armclk"),
+ ALIAS(DOUT_HCLKM, NULL, "hclk_msys"),
+ ALIAS(MOUT_DMC0, NULL, "sclk_dmc0"),
+};
+
+/* S5PV210-specific PLLs. */
+static struct samsung_pll_clock s5pv210_pll_clks[] __initdata = {
+ [apll] = PLL(pll_4508, FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON, NULL),
+ [epll] = PLL(pll_4600, FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+ [vpll] = PLL(pll_4502, FOUT_VPLL, "fout_vpll", "mout_vpllsrc",
+ VPLL_LOCK, VPLL_CON, NULL),
+};
+
+/* S5P6442-specific PLLs. */
+static struct samsung_pll_clock s5p6442_pll_clks[] __initdata = {
+ [apll] = PLL(pll_4502, FOUT_APLL, "fout_apll", "fin_pll",
+ APLL_LOCK, APLL_CON0, NULL),
+ [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll",
+ MPLL_LOCK, MPLL_CON, NULL),
+ [epll] = PLL(pll_4500, FOUT_EPLL, "fout_epll", "fin_pll",
+ EPLL_LOCK, EPLL_CON0, NULL),
+ [vpll] = PLL(pll_4500, FOUT_VPLL, "fout_vpll", "fin_pll",
+ VPLL_LOCK, VPLL_CON, NULL),
+};
+
+static void __init __s5pv210_clk_init(struct device_node *np,
+ unsigned long xxti_f,
+ unsigned long xusbxti_f,
+ bool is_s5p6442)
+{
+ struct samsung_clk_provider *ctx;
+
+ ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ if (!ctx)
+ panic("%s: unable to allocate context.\n", __func__);
+
+ samsung_clk_register_mux(ctx, early_mux_clks,
+ ARRAY_SIZE(early_mux_clks));
+
+ if (is_s5p6442) {
+ samsung_clk_register_fixed_rate(ctx, s5p6442_frate_clks,
+ ARRAY_SIZE(s5p6442_frate_clks));
+ samsung_clk_register_pll(ctx, s5p6442_pll_clks,
+ ARRAY_SIZE(s5p6442_pll_clks), reg_base);
+ samsung_clk_register_mux(ctx, s5p6442_mux_clks,
+ ARRAY_SIZE(s5p6442_mux_clks));
+ samsung_clk_register_div(ctx, s5p6442_div_clks,
+ ARRAY_SIZE(s5p6442_div_clks));
+ samsung_clk_register_gate(ctx, s5p6442_gate_clks,
+ ARRAY_SIZE(s5p6442_gate_clks));
+ } else {
+ samsung_clk_register_fixed_rate(ctx, s5pv210_frate_clks,
+ ARRAY_SIZE(s5pv210_frate_clks));
+ samsung_clk_register_pll(ctx, s5pv210_pll_clks,
+ ARRAY_SIZE(s5pv210_pll_clks), reg_base);
+ samsung_clk_register_mux(ctx, s5pv210_mux_clks,
+ ARRAY_SIZE(s5pv210_mux_clks));
+ samsung_clk_register_div(ctx, s5pv210_div_clks,
+ ARRAY_SIZE(s5pv210_div_clks));
+ samsung_clk_register_gate(ctx, s5pv210_gate_clks,
+ ARRAY_SIZE(s5pv210_gate_clks));
+ }
+
+ samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
+ samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
+ samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
+
+ samsung_clk_register_fixed_factor(ctx, ffactor_clks,
+ ARRAY_SIZE(ffactor_clks));
+
+ samsung_clk_register_alias(ctx, s5pv210_aliases,
+ ARRAY_SIZE(s5pv210_aliases));
+
+ s5pv210_clk_sleep_init();
+
+ pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
+ "\tmout_epll = %ld, mout_vpll = %ld\n",
+ is_s5p6442 ? "S5P6442" : "S5PV210",
+ _get_rate("mout_apll"), _get_rate("mout_mpll"),
+ _get_rate("mout_epll"), _get_rate("mout_vpll"));
+}
+
+static void __init s5pv210_clk_dt_init(struct device_node *np)
+{
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ __s5pv210_clk_init(np, 0, 0, false);
+}
+CLK_OF_DECLARE(s5pv210_clk, "samsung,s5pv210-clock", s5pv210_clk_dt_init);
+
+static void __init s5p6442_clk_dt_init(struct device_node *np)
+{
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ __s5pv210_clk_init(np, 0, 0, true);
+}
+CLK_OF_DECLARE(s5p6442_clk, "samsung,s5p6442-clock", s5p6442_clk_dt_init);
diff --git a/drivers/clk/tegra/clk-periph-gate.c b/drivers/clk/tegra/clk-periph-gate.c
index 507015314827..0aa8830ae7cc 100644
--- a/drivers/clk/tegra/clk-periph-gate.c
+++ b/drivers/clk/tegra/clk-periph-gate.c
@@ -20,7 +20,8 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/tegra-soc.h>
+
+#include <soc/tegra/fuse.h>
#include "clk.h"
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 8b10c38b6e3c..5bbacd01094f 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -22,8 +22,11 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clk/tegra.h>
-#include <linux/tegra-powergate.h>
+
+#include <soc/tegra/pmc.h>
+
#include <dt-bindings/clock/tegra30-car.h>
+
#include "clk.h"
#include "clk-id.h"
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index bf452b62beb8..f87c609e8f72 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -19,7 +19,8 @@
#include <linux/of.h>
#include <linux/clk/tegra.h>
#include <linux/reset-controller.h>
-#include <linux/tegra-soc.h>
+
+#include <soc/tegra/fuse.h>
#include "clk.h"
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index fd449f9b006d..162e519cb0f9 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -1,6 +1,5 @@
# Makefile for Versatile-specific clocks
-obj-$(CONFIG_ICST) += clk-icst.o
-obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o
+obj-$(CONFIG_ICST) += clk-icst.o clk-versatile.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-versatile.c
index 734c4b8fe6ab..a76981e88cb6 100644
--- a/drivers/clk/versatile/clk-integrator.c
+++ b/drivers/clk/versatile/clk-versatile.c
@@ -1,5 +1,6 @@
/*
- * Clock driver for the ARM Integrator/AP and Integrator/CP boards
+ * Clock driver for the ARM Integrator/AP, Integrator/CP, Versatile AB and
+ * Versatile PB boards.
* Copyright (C) 2012 Linus Walleij
*
* This program is free software; you can redistribute it and/or modify
@@ -17,6 +18,9 @@
#define INTEGRATOR_HDR_LOCK_OFFSET 0x14
+#define VERSATILE_SYS_OSCCLCD_OFFSET 0x1c
+#define VERSATILE_SYS_LOCK_OFFSET 0x20
+
/* Base offset for the core module */
static void __iomem *cm_base;
@@ -37,11 +41,27 @@ static const struct clk_icst_desc __initdata cm_auxosc_desc = {
.lock_offset = INTEGRATOR_HDR_LOCK_OFFSET,
};
-static void __init of_integrator_cm_osc_setup(struct device_node *np)
+static const struct icst_params versatile_auxosc_params = {
+ .vco_max = ICST307_VCO_MAX,
+ .vco_min = ICST307_VCO_MIN,
+ .vd_min = 4 + 8,
+ .vd_max = 511 + 8,
+ .rd_min = 1 + 2,
+ .rd_max = 127 + 2,
+ .s2div = icst307_s2div,
+ .idx2s = icst307_idx2s,
+};
+
+static const struct clk_icst_desc versatile_auxosc_desc __initconst = {
+ .params = &versatile_auxosc_params,
+ .vco_offset = VERSATILE_SYS_OSCCLCD_OFFSET,
+ .lock_offset = VERSATILE_SYS_LOCK_OFFSET,
+};
+static void __init cm_osc_setup(struct device_node *np,
+ const struct clk_icst_desc *desc)
{
struct clk *clk = ERR_PTR(-EINVAL);
const char *clk_name = np->name;
- const struct clk_icst_desc *desc = &cm_auxosc_desc;
const char *parent_name;
if (!cm_base) {
@@ -65,5 +85,17 @@ static void __init of_integrator_cm_osc_setup(struct device_node *np)
if (!IS_ERR(clk))
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+
+static void __init of_integrator_cm_osc_setup(struct device_node *np)
+{
+ cm_osc_setup(np, &cm_auxosc_desc);
+}
CLK_OF_DECLARE(integrator_cm_auxosc_clk,
"arm,integrator-cm-auxosc", of_integrator_cm_osc_setup);
+
+static void __init of_versatile_cm_osc_setup(struct device_node *np)
+{
+ cm_osc_setup(np, &versatile_auxosc_desc);
+}
+CLK_OF_DECLARE(versatile_cm_auxosc_clk,
+ "arm,versatile-cm-auxosc", of_versatile_cm_osc_setup);
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/tegra20_timer.c
index d1869f02051c..d2616ef16770 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/tegra20_timer.c
@@ -27,6 +27,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#include <linux/delay.h>
#include <asm/mach/time.h>
#include <asm/smp_twd.h>
@@ -53,6 +54,8 @@ static void __iomem *rtc_base;
static struct timespec persistent_ts;
static u64 persistent_ms, last_persistent_ms;
+static struct delay_timer tegra_delay_timer;
+
#define timer_writel(value, reg) \
__raw_writel(value, timer_reg_base + (reg))
#define timer_readl(reg) \
@@ -139,6 +142,11 @@ static void tegra_read_persistent_clock(struct timespec *ts)
*ts = *tsp;
}
+static unsigned long tegra_delay_timer_read_counter_long(void)
+{
+ return readl(timer_reg_base + TIMERUS_CNTR_1US);
+}
+
static irqreturn_t tegra_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
@@ -206,6 +214,11 @@ static void __init tegra20_init_timer(struct device_node *np)
BUG();
}
+ tegra_delay_timer.read_current_timer =
+ tegra_delay_timer_read_counter_long;
+ tegra_delay_timer.freq = 1000000;
+ register_current_timer_delay(&tegra_delay_timer);
+
ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
if (ret) {
pr_err("Failed to register timer IRQ: %d\n", ret);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6f024852c6fb..d9fdeddcef96 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1076,10 +1076,20 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
kfree(policy);
}
-static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
+ struct device *cpu_dev)
{
+ int ret;
+
if (WARN_ON(cpu == policy->cpu))
- return;
+ return 0;
+
+ /* Move kobject to the new policy->cpu */
+ ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
+ return ret;
+ }
down_write(&policy->rwsem);
@@ -1090,6 +1100,8 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_UPDATE_POLICY_CPU, policy);
+
+ return 0;
}
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
@@ -1153,12 +1165,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
* the creation of a brand new one. So we need to perform this update
* by invoking update_policy_cpu().
*/
- if (recover_policy && cpu != policy->cpu) {
- update_policy_cpu(policy, cpu);
- WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
- } else {
+ if (recover_policy && cpu != policy->cpu)
+ WARN_ON(update_policy_cpu(policy, cpu, dev));
+ else
policy->cpu = cpu;
- }
cpumask_copy(policy->cpus, cpumask_of(cpu));
@@ -1309,38 +1319,11 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
return __cpufreq_add_dev(dev, sif);
}
-static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
- unsigned int old_cpu)
-{
- struct device *cpu_dev;
- int ret;
-
- /* first sibling now owns the new sysfs dir */
- cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
-
- sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
- ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
- if (ret) {
- pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
-
- down_write(&policy->rwsem);
- cpumask_set_cpu(old_cpu, policy->cpus);
- up_write(&policy->rwsem);
-
- ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
- "cpufreq");
-
- return -EINVAL;
- }
-
- return cpu_dev->id;
-}
-
static int __cpufreq_remove_dev_prepare(struct device *dev,
struct subsys_interface *sif)
{
unsigned int cpu = dev->id, cpus;
- int new_cpu, ret;
+ int ret;
unsigned long flags;
struct cpufreq_policy *policy;
@@ -1380,14 +1363,23 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (cpu != policy->cpu) {
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
- new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
- if (new_cpu >= 0) {
- update_policy_cpu(policy, new_cpu);
+ /* Nominate new CPU */
+ int new_cpu = cpumask_any_but(policy->cpus, cpu);
+ struct device *cpu_dev = get_cpu_device(new_cpu);
- if (!cpufreq_suspended)
- pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
- __func__, new_cpu, cpu);
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = update_policy_cpu(policy, new_cpu, cpu_dev);
+ if (ret) {
+ if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
+ "cpufreq"))
+ pr_err("%s: Failed to restore kobj link to cpu:%d\n",
+ __func__, cpu_dev->id);
+ return ret;
}
+
+ if (!cpufreq_suspended)
+ pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+ __func__, new_cpu, cpu);
} else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
cpufreq_driver->stop_cpu(policy);
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 18d409189092..ad3f38fd3eb9 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -170,21 +170,24 @@ static void od_check_cpu(int cpu, unsigned int load)
dbs_freq_increase(policy, policy->max);
} else {
/* Calculate the next frequency proportional to load */
- unsigned int freq_next;
- freq_next = load * policy->cpuinfo.max_freq / 100;
+ unsigned int freq_next, min_f, max_f;
+
+ min_f = policy->cpuinfo.min_freq;
+ max_f = policy->cpuinfo.max_freq;
+ freq_next = min_f + load * (max_f - min_f) / 100;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
if (!od_tuners->powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
- CPUFREQ_RELATION_L);
+ CPUFREQ_RELATION_C);
return;
}
freq_next = od_ops.powersave_bias_target(policy, freq_next,
CPUFREQ_RELATION_L);
- __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
}
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 1632981c4b25..df14766a8e06 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -117,7 +117,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
.frequency = 0,
};
struct cpufreq_frequency_table *pos;
- unsigned int freq, i = 0;
+ unsigned int freq, diff, i = 0;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
target_freq, relation, policy->cpu);
@@ -127,6 +127,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
suboptimal.frequency = ~0;
break;
case CPUFREQ_RELATION_L:
+ case CPUFREQ_RELATION_C:
optimal.frequency = ~0;
break;
}
@@ -168,6 +169,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
}
break;
+ case CPUFREQ_RELATION_C:
+ diff = abs(freq - target_freq);
+ if (diff < optimal.frequency ||
+ (diff == optimal.frequency &&
+ freq > table[optimal.driver_data].frequency)) {
+ optimal.frequency = diff;
+ optimal.driver_data = i;
+ }
+ break;
}
}
if (optimal.driver_data > i) {
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index af366c21d4b4..c2d30765bf3d 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -66,10 +66,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* scaling up? scale voltage before frequency */
if (new_freq > old_freq) {
- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
- if (ret) {
- dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
- return ret;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
+ return ret;
+ }
}
ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
if (ret) {
@@ -121,10 +123,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
ret = 0;
}
- ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
- if (ret) {
- dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
- ret = 0;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
+ if (ret) {
+ dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
+ ret = 0;
+ }
}
}
@@ -182,9 +186,9 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
}
arm_reg = regulator_get(cpu_dev, "arm");
- pu_reg = regulator_get(cpu_dev, "pu");
+ pu_reg = regulator_get_optional(cpu_dev, "pu");
soc_reg = regulator_get(cpu_dev, "soc");
- if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) {
+ if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
dev_err(cpu_dev, "failed to get regulators\n");
ret = -ENOENT;
goto put_reg;
@@ -268,9 +272,11 @@ soc_opp_out:
ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
if (ret > 0)
transition_latency += ret * 1000;
- ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
- if (ret > 0)
- transition_latency += ret * 1000;
+ if (!IS_ERR(pu_reg)) {
+ ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
+ if (ret > 0)
+ transition_latency += ret * 1000;
+ }
/*
* OPP is maintained in order of increasing frequency, and
@@ -327,7 +333,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
regulator_put(arm_reg);
- regulator_put(pu_reg);
+ if (!IS_ERR(pu_reg))
+ regulator_put(pu_reg);
regulator_put(soc_reg);
clk_put(arm_clk);
clk_put(pll1_sys_clk);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 86631cb6f7de..c5eac949760d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,7 +37,6 @@
#define BYT_TURBO_RATIOS 0x66c
#define BYT_TURBO_VIDS 0x66d
-
#define FRAC_BITS 8
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
@@ -50,7 +49,7 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
static inline int32_t div_fp(int32_t x, int32_t y)
{
- return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
+ return div_s64((int64_t)x << FRAC_BITS, y);
}
struct sample {
@@ -148,7 +147,7 @@ static struct perf_limits limits = {
};
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
- int deadband, int integral) {
+ int deadband, int integral) {
pid->setpoint = setpoint;
pid->deadband = deadband;
pid->integral = int_tofp(integral);
@@ -167,7 +166,6 @@ static inline void pid_i_gain_set(struct _pid *pid, int percent)
static inline void pid_d_gain_set(struct _pid *pid, int percent)
{
-
pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
}
@@ -207,16 +205,13 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
- pid_reset(&cpu->pid,
- pid_params.setpoint,
- 100,
- pid_params.deadband,
- 0);
+ pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
}
static inline void intel_pstate_reset_all_pid(void)
{
unsigned int cpu;
+
for_each_online_cpu(cpu) {
if (all_cpu_data[cpu])
intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
@@ -230,13 +225,13 @@ static int pid_param_set(void *data, u64 val)
intel_pstate_reset_all_pid();
return 0;
}
+
static int pid_param_get(void *data, u64 *val)
{
*val = *(u32 *)data;
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
- pid_param_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
struct pid_param {
char *name;
@@ -253,9 +248,9 @@ static struct pid_param pid_files[] = {
{NULL, NULL}
};
-static struct dentry *debugfs_parent;
-static void intel_pstate_debug_expose_params(void)
+static void __init intel_pstate_debug_expose_params(void)
{
+ struct dentry *debugfs_parent;
int i = 0;
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
@@ -263,8 +258,8 @@ static void intel_pstate_debug_expose_params(void)
return;
while (pid_files[i].name) {
debugfs_create_file(pid_files[i].name, 0660,
- debugfs_parent, pid_files[i].value,
- &fops_pid_param);
+ debugfs_parent, pid_files[i].value,
+ &fops_pid_param);
i++;
}
}
@@ -280,10 +275,11 @@ static void intel_pstate_debug_expose_params(void)
}
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
+
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -296,10 +292,11 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
}
static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
+
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -307,14 +304,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
+
return count;
}
static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
- const char *buf, size_t count)
+ const char *buf, size_t count)
{
unsigned int input;
int ret;
+
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
@@ -342,17 +341,16 @@ static struct attribute *intel_pstate_attributes[] = {
static struct attribute_group intel_pstate_attr_group = {
.attrs = intel_pstate_attributes,
};
-static struct kobject *intel_pstate_kobject;
-static void intel_pstate_sysfs_expose_params(void)
+static void __init intel_pstate_sysfs_expose_params(void)
{
+ struct kobject *intel_pstate_kobject;
int rc;
intel_pstate_kobject = kobject_create_and_add("intel_pstate",
&cpu_subsys.dev_root->kobj);
BUG_ON(!intel_pstate_kobject);
- rc = sysfs_create_group(intel_pstate_kobject,
- &intel_pstate_attr_group);
+ rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
BUG_ON(rc);
}
@@ -360,6 +358,7 @@ static void intel_pstate_sysfs_expose_params(void)
static int byt_get_min_pstate(void)
{
u64 value;
+
rdmsrl(BYT_RATIOS, value);
return (value >> 8) & 0x7F;
}
@@ -367,6 +366,7 @@ static int byt_get_min_pstate(void)
static int byt_get_max_pstate(void)
{
u64 value;
+
rdmsrl(BYT_RATIOS, value);
return (value >> 16) & 0x7F;
}
@@ -374,6 +374,7 @@ static int byt_get_max_pstate(void)
static int byt_get_turbo_pstate(void)
{
u64 value;
+
rdmsrl(BYT_TURBO_RATIOS, value);
return value & 0x7F;
}
@@ -407,7 +408,6 @@ static void byt_get_vid(struct cpudata *cpudata)
{
u64 value;
-
rdmsrl(BYT_VIDS, value);
cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
@@ -420,10 +420,10 @@ static void byt_get_vid(struct cpudata *cpudata)
cpudata->vid.turbo = value & 0x7f;
}
-
static int core_get_min_pstate(void)
{
u64 value;
+
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 40) & 0xFF;
}
@@ -431,6 +431,7 @@ static int core_get_min_pstate(void)
static int core_get_max_pstate(void)
{
u64 value;
+
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 8) & 0xFF;
}
@@ -439,9 +440,10 @@ static int core_get_turbo_pstate(void)
{
u64 value;
int nont, ret;
+
rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
nont = core_get_max_pstate();
- ret = ((value) & 255);
+ ret = (value) & 255;
if (ret <= nont)
ret = nont;
return ret;
@@ -493,12 +495,12 @@ static struct cpu_defaults byt_params = {
},
};
-
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
int max_perf = cpu->pstate.turbo_pstate;
int max_perf_adj;
int min_perf;
+
if (limits.no_turbo)
max_perf = cpu->pstate.max_pstate;
@@ -507,8 +509,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
- *min = clamp_t(int, min_perf,
- cpu->pstate.min_pstate, max_perf);
+ *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
}
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
@@ -529,21 +530,6 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
pstate_funcs.set(cpu, pstate);
}
-static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
-{
- int target;
- target = cpu->pstate.current_pstate + steps;
-
- intel_pstate_set_pstate(cpu, target);
-}
-
-static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
-{
- int target;
- target = cpu->pstate.current_pstate - steps;
- intel_pstate_set_pstate(cpu, target);
-}
-
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -559,13 +545,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
int64_t core_pct;
- int32_t rem;
core_pct = int_tofp(sample->aperf) * int_tofp(100);
- core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem);
-
- if ((rem << 1) >= int_tofp(sample->mperf))
- core_pct += 1;
+ core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
sample->freq = fp_toint(
mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
@@ -576,12 +558,12 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
+ unsigned long flags;
+ local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
-
- aperf = aperf >> FRAC_BITS;
- mperf = mperf >> FRAC_BITS;
+ local_irq_restore(flags);
cpu->last_sample_time = cpu->sample.time;
cpu->sample.time = ktime_get();
@@ -598,10 +580,9 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
- int sample_time, delay;
+ int delay;
- sample_time = pid_params.sample_rate_ms;
- delay = msecs_to_jiffies(sample_time);
+ delay = msecs_to_jiffies(pid_params.sample_rate_ms);
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
@@ -616,12 +597,12 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
current_pstate = int_tofp(cpu->pstate.current_pstate);
core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
- sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC);
+ sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
duration_us = (u32) ktime_us_delta(cpu->sample.time,
- cpu->last_sample_time);
+ cpu->last_sample_time);
if (duration_us > sample_time * 3) {
sample_ratio = div_fp(int_tofp(sample_time),
- int_tofp(duration_us));
+ int_tofp(duration_us));
core_busy = mul_fp(core_busy, sample_ratio);
}
@@ -632,20 +613,15 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{
int32_t busy_scaled;
struct _pid *pid;
- signed int ctl = 0;
- int steps;
+ signed int ctl;
pid = &cpu->pid;
busy_scaled = intel_pstate_get_scaled_busy(cpu);
ctl = pid_calc(pid, busy_scaled);
- steps = abs(ctl);
-
- if (ctl < 0)
- intel_pstate_pstate_increase(cpu, steps);
- else
- intel_pstate_pstate_decrease(cpu, steps);
+ /* Negative values of ctl increase the pstate and vice versa */
+ intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
}
static void intel_pstate_timer_func(unsigned long __data)
@@ -705,8 +681,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
init_timer_deferrable(&cpu->timer);
cpu->timer.function = intel_pstate_timer_func;
- cpu->timer.data =
- (unsigned long)cpu;
+ cpu->timer.data = (unsigned long)cpu;
cpu->timer.expires = jiffies + HZ/100;
intel_pstate_busy_pid_reset(cpu);
intel_pstate_sample(cpu);
@@ -751,7 +726,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
- limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
+ limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
@@ -763,8 +738,8 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
cpufreq_verify_within_cpu_limits(policy);
- if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
- (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
+ if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
+ policy->policy != CPUFREQ_POLICY_PERFORMANCE)
return -EINVAL;
return 0;
@@ -797,7 +772,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
- cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
+ cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
limits.turbo_disabled = 1;
limits.no_turbo = 1;
}
@@ -839,8 +814,8 @@ static int intel_pstate_msrs_not_valid(void)
rdmsrl(MSR_IA32_MPERF, mperf);
if (!pstate_funcs.get_max() ||
- !pstate_funcs.get_min() ||
- !pstate_funcs.get_turbo())
+ !pstate_funcs.get_min() ||
+ !pstate_funcs.get_turbo())
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
@@ -922,14 +897,14 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
struct acpi_table_header hdr;
struct hw_vendor_info *v_info;
- if (acpi_disabled
- || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
+ if (acpi_disabled ||
+ ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
return false;
for (v_info = vendor_info; v_info->valid; v_info++) {
- if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
- && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
- && intel_pstate_no_acpi_pss())
+ if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
+ !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
+ intel_pstate_no_acpi_pss())
return true;
}
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index d4add8621944..9fa177206032 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -148,9 +148,9 @@ static void loongson2_cpu_wait(void)
u32 cpu_freq;
spin_lock_irqsave(&loongson2_wait_lock, flags);
- cpu_freq = LOONGSON_CHIPCFG0;
- LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
+ cpu_freq = LOONGSON_CHIPCFG(0);
+ LOONGSON_CHIPCFG(0) &= ~0x7; /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG(0) = cpu_freq; /* Restore CPU state */
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
local_irq_enable();
}
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index c8012bc86910..f91027259c3c 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -55,6 +55,7 @@ static const struct {
unsigned freq;
unsigned mult;
} usual_frequency_table[] = {
+ { 350000, 35 }, // 100 * 3.5
{ 400000, 40 }, // 100 * 4
{ 450000, 45 }, // 100 * 4.5
{ 475000, 50 }, // 95 * 5
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index bb1d08dc8cc8..379c0837f5a9 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -28,6 +28,7 @@
#include <linux/of.h>
#include <asm/cputhreads.h>
+#include <asm/firmware.h>
#include <asm/reg.h>
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
@@ -98,7 +99,11 @@ static int init_powernv_pstates(void)
return -ENODEV;
}
- WARN_ON(len_ids != len_freqs);
+ if (len_ids != len_freqs) {
+ pr_warn("Entries in ibm,pstate-ids and "
+ "ibm,pstate-frequencies-mhz does not match\n");
+ }
+
nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
if (!nr_pstates) {
pr_warn("No PStates found\n");
@@ -131,7 +136,12 @@ static unsigned int pstate_id_to_freq(int pstate_id)
int i;
i = powernv_pstate_info.max - pstate_id;
- BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0);
+ if (i >= powernv_pstate_info.nr_pstates || i < 0) {
+ pr_warn("PState id %d outside of PState table, "
+ "reporting nominal id %d instead\n",
+ pstate_id, powernv_pstate_info.nominal);
+ i = powernv_pstate_info.max - powernv_pstate_info.nominal;
+ }
return powernv_freqs[i].frequency;
}
@@ -321,6 +331,10 @@ static int __init powernv_cpufreq_init(void)
{
int rc = 0;
+ /* Don't probe on pseries (guest) platforms */
+ if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ return -ENODEV;
+
/* Discover pstates from device tree and init */
rc = init_powernv_pstates();
if (rc) {
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
index cfa0dd8723ec..b8e5da8e188b 100644
--- a/drivers/cpufreq/s3c2410-cpufreq.c
+++ b/drivers/cpufreq/s3c2410-cpufreq.c
@@ -26,7 +26,6 @@
#include <mach/regs-clock.h>
#include <plat/cpu.h>
-#include <plat/clock.h>
#include <plat/cpu-freq-core.h>
/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
@@ -104,7 +103,6 @@ static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
.calc_iotiming = s3c2410_iotiming_calc,
.set_iotiming = s3c2410_iotiming_set,
.get_iotiming = s3c2410_iotiming_get,
- .resume_clocks = s3c2410_setup_clocks,
.set_fvco = s3c2410_set_fvco,
.set_refresh = s3c2410_cpufreq_setrefresh,
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
index 4645b4898996..eb262133fef2 100644
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ b/drivers/cpufreq/s3c2412-cpufreq.c
@@ -28,7 +28,6 @@
#include <mach/s3c2412.h>
#include <plat/cpu.h>
-#include <plat/clock.h>
#include <plat/cpu-freq-core.h>
/* our clock resources. */
@@ -188,8 +187,6 @@ static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
.set_iotiming = s3c2412_iotiming_set,
.get_iotiming = s3c2412_iotiming_get,
- .resume_clocks = s3c2412_setup_clocks,
-
.debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
};
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index f84ed10755b5..0129f5c70a61 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -29,7 +29,6 @@
#include <plat/cpu.h>
#include <plat/cpu-freq-core.h>
-#include <plat/clock.h>
static struct clk *xtal;
static struct clk *fclk;
@@ -262,8 +261,6 @@ static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
.calc_divs = s3c2440_cpufreq_calcdivs,
.calc_freqtable = s3c2440_cpufreq_calctable,
- .resume_clocks = s3c244x_setup_clocks,
-
.debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
};
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 227ebf7c1eea..d00f1cee4509 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -27,7 +27,6 @@
#include <asm/mach/map.h>
#include <plat/cpu.h>
-#include <plat/clock.h>
#include <plat/cpu-freq-core.h>
#include <mach/regs-clock.h>
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 19a10b89fef7..9a68225a757e 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -16,11 +16,70 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regulator/consumer.h>
-#include <mach/map.h>
-#include <mach/regs-clock.h>
+static void __iomem *clk_base;
+static void __iomem *dmc_base[2];
+
+#define S5P_CLKREG(x) (clk_base + (x))
+
+#define S5P_APLL_LOCK S5P_CLKREG(0x00)
+#define S5P_APLL_CON S5P_CLKREG(0x100)
+#define S5P_CLK_SRC0 S5P_CLKREG(0x200)
+#define S5P_CLK_SRC2 S5P_CLKREG(0x208)
+#define S5P_CLK_DIV0 S5P_CLKREG(0x300)
+#define S5P_CLK_DIV2 S5P_CLKREG(0x308)
+#define S5P_CLK_DIV6 S5P_CLKREG(0x318)
+#define S5P_CLKDIV_STAT0 S5P_CLKREG(0x1000)
+#define S5P_CLKDIV_STAT1 S5P_CLKREG(0x1004)
+#define S5P_CLKMUX_STAT0 S5P_CLKREG(0x1100)
+#define S5P_CLKMUX_STAT1 S5P_CLKREG(0x1104)
+
+#define S5P_ARM_MCS_CON S5P_CLKREG(0x6100)
+
+/* CLKSRC0 */
+#define S5P_CLKSRC0_MUX200_SHIFT (16)
+#define S5P_CLKSRC0_MUX200_MASK (0x1 << S5P_CLKSRC0_MUX200_SHIFT)
+#define S5P_CLKSRC0_MUX166_MASK (0x1<<20)
+#define S5P_CLKSRC0_MUX133_MASK (0x1<<24)
+
+/* CLKSRC2 */
+#define S5P_CLKSRC2_G3D_SHIFT (0)
+#define S5P_CLKSRC2_G3D_MASK (0x3 << S5P_CLKSRC2_G3D_SHIFT)
+#define S5P_CLKSRC2_MFC_SHIFT (4)
+#define S5P_CLKSRC2_MFC_MASK (0x3 << S5P_CLKSRC2_MFC_SHIFT)
+
+/* CLKDIV0 */
+#define S5P_CLKDIV0_APLL_SHIFT (0)
+#define S5P_CLKDIV0_APLL_MASK (0x7 << S5P_CLKDIV0_APLL_SHIFT)
+#define S5P_CLKDIV0_A2M_SHIFT (4)
+#define S5P_CLKDIV0_A2M_MASK (0x7 << S5P_CLKDIV0_A2M_SHIFT)
+#define S5P_CLKDIV0_HCLK200_SHIFT (8)
+#define S5P_CLKDIV0_HCLK200_MASK (0x7 << S5P_CLKDIV0_HCLK200_SHIFT)
+#define S5P_CLKDIV0_PCLK100_SHIFT (12)
+#define S5P_CLKDIV0_PCLK100_MASK (0x7 << S5P_CLKDIV0_PCLK100_SHIFT)
+#define S5P_CLKDIV0_HCLK166_SHIFT (16)
+#define S5P_CLKDIV0_HCLK166_MASK (0xF << S5P_CLKDIV0_HCLK166_SHIFT)
+#define S5P_CLKDIV0_PCLK83_SHIFT (20)
+#define S5P_CLKDIV0_PCLK83_MASK (0x7 << S5P_CLKDIV0_PCLK83_SHIFT)
+#define S5P_CLKDIV0_HCLK133_SHIFT (24)
+#define S5P_CLKDIV0_HCLK133_MASK (0xF << S5P_CLKDIV0_HCLK133_SHIFT)
+#define S5P_CLKDIV0_PCLK66_SHIFT (28)
+#define S5P_CLKDIV0_PCLK66_MASK (0x7 << S5P_CLKDIV0_PCLK66_SHIFT)
+
+/* CLKDIV2 */
+#define S5P_CLKDIV2_G3D_SHIFT (0)
+#define S5P_CLKDIV2_G3D_MASK (0xF << S5P_CLKDIV2_G3D_SHIFT)
+#define S5P_CLKDIV2_MFC_SHIFT (4)
+#define S5P_CLKDIV2_MFC_MASK (0xF << S5P_CLKDIV2_MFC_SHIFT)
+
+/* CLKDIV6 */
+#define S5P_CLKDIV6_ONEDRAM_SHIFT (28)
+#define S5P_CLKDIV6_ONEDRAM_MASK (0xF << S5P_CLKDIV6_ONEDRAM_SHIFT)
static struct clk *dmc0_clk;
static struct clk *dmc1_clk;
@@ -142,9 +201,9 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
void __iomem *reg = NULL;
if (ch == DMC0) {
- reg = (S5P_VA_DMC0 + 0x30);
+ reg = (dmc_base[0] + 0x30);
} else if (ch == DMC1) {
- reg = (S5P_VA_DMC1 + 0x30);
+ reg = (dmc_base[1] + 0x30);
} else {
printk(KERN_ERR "Cannot find DMC port\n");
return;
@@ -472,7 +531,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
* check_mem_type : This driver only support LPDDR & LPDDR2.
* other memory type is not supported.
*/
- mem_type = check_mem_type(S5P_VA_DMC0);
+ mem_type = check_mem_type(dmc_base[0]);
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
@@ -481,10 +540,10 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
}
/* Find current refresh counter and frequency each DMC */
- s5pv210_dram_conf[0].refresh = (__raw_readl(S5P_VA_DMC0 + 0x30) * 1000);
+ s5pv210_dram_conf[0].refresh = (__raw_readl(dmc_base[0] + 0x30) * 1000);
s5pv210_dram_conf[0].freq = clk_get_rate(dmc0_clk);
- s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
+ s5pv210_dram_conf[1].refresh = (__raw_readl(dmc_base[1] + 0x30) * 1000);
s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
policy->suspend_freq = SLEEP_FREQ;
@@ -527,8 +586,55 @@ static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
.notifier_call = s5pv210_cpufreq_reboot_notifier_event,
};
-static int __init s5pv210_cpufreq_init(void)
+static int s5pv210_cpufreq_probe(struct platform_device *pdev)
{
+ struct device_node *np;
+ int id;
+
+ /*
+ * HACK: This is a temporary workaround to get access to clock
+ * and DMC controller registers directly and remove static mappings
+ * and dependencies on platform headers. It is necessary to enable
+ * S5PV210 multi-platform support and will be removed together with
+ * this whole driver as soon as S5PV210 gets migrated to use
+ * cpufreq-cpu0 driver.
+ */
+ np = of_find_compatible_node(NULL, NULL, "samsung,s5pv210-clock");
+ if (!np) {
+ pr_err("%s: failed to find clock controller DT node\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ clk_base = of_iomap(np, 0);
+ if (!clk_base) {
+ pr_err("%s: failed to map clock registers\n", __func__);
+ return -EFAULT;
+ }
+
+ for_each_compatible_node(np, NULL, "samsung,s5pv210-dmc") {
+ id = of_alias_get_id(np, "dmc");
+ if (id < 0 || id >= ARRAY_SIZE(dmc_base)) {
+ pr_err("%s: failed to get alias of dmc node '%s'\n",
+ __func__, np->name);
+ return id;
+ }
+
+ dmc_base[id] = of_iomap(np, 0);
+ if (!dmc_base[id]) {
+ pr_err("%s: failed to map dmc%d registers\n",
+ __func__, id);
+ return -EFAULT;
+ }
+ }
+
+ for (id = 0; id < ARRAY_SIZE(dmc_base); ++id) {
+ if (!dmc_base[id]) {
+ pr_err("%s: failed to find dmc%d node\n", __func__, id);
+ return -ENODEV;
+ }
+ }
+
arm_regulator = regulator_get(NULL, "vddarm");
if (IS_ERR(arm_regulator)) {
pr_err("failed to get regulator vddarm");
@@ -547,4 +653,11 @@ static int __init s5pv210_cpufreq_init(void)
return cpufreq_register_driver(&s5pv210_driver);
}
-late_initcall(s5pv210_cpufreq_init);
+static struct platform_driver s5pv210_cpufreq_platdrv = {
+ .driver = {
+ .name = "s5pv210-cpufreq",
+ .owner = THIS_MODULE,
+ },
+ .probe = s5pv210_cpufreq_probe,
+};
+module_platform_driver(s5pv210_cpufreq_platdrv);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 1b96fb91d32c..32748c36c477 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -15,12 +15,7 @@ config CPU_IDLE
if CPU_IDLE
config CPU_IDLE_MULTIPLE_DRIVERS
- bool "Support multiple cpuidle drivers"
- default n
- help
- Allows the cpuidle framework to use different drivers for each CPU.
- This is useful if you have a system with different CPU latencies and
- states. If unsure say N.
+ bool
config CPU_IDLE_GOV_LADDER
bool "Ladder governor (for periodic timer tick)"
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index b6d69e899f5d..38cff69ffe06 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -1,15 +1,10 @@
#
# ARM CPU Idle drivers
#
-config ARM_ARMADA_370_XP_CPUIDLE
- bool "CPU Idle Driver for Armada 370/XP family processors"
- depends on ARCH_MVEBU
- help
- Select this to enable cpuidle on Armada 370/XP processors.
-
config ARM_BIG_LITTLE_CPUIDLE
bool "Support for ARM big.LITTLE processors"
- depends on ARCH_VEXPRESS_TC2_PM
+ depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
+ depends on MCPM
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
help
@@ -61,3 +56,9 @@ config ARM_EXYNOS_CPUIDLE
depends on ARCH_EXYNOS
help
Select this to enable cpuidle for Exynos processors
+
+config ARM_MVEBU_V7_CPUIDLE
+ bool "CPU Idle Driver for mvebu v7 family processors"
+ depends on ARCH_MVEBU
+ help
+ Select this to enable cpuidle on Armada 370, 38x and XP processors.
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index d8bb1ff72561..11edb31c55e9 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
##################################################################################
# ARM SoC drivers
-obj-$(CONFIG_ARM_ARMADA_370_XP_CPUIDLE) += cpuidle-armada-370-xp.o
+obj-$(CONFIG_ARM_MVEBU_V7_CPUIDLE) += cpuidle-mvebu-v7.o
obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o
obj-$(CONFIG_ARM_CLPS711X_CPUIDLE) += cpuidle-clps711x.o
obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
diff --git a/drivers/cpuidle/cpuidle-armada-370-xp.c b/drivers/cpuidle/cpuidle-armada-370-xp.c
deleted file mode 100644
index a5fba0287bfb..000000000000
--- a/drivers/cpuidle/cpuidle-armada-370-xp.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Marvell Armada 370 and Armada XP SoC cpuidle driver
- *
- * Copyright (C) 2014 Marvell
- *
- * Nadav Haklai <nadavh@marvell.com>
- * Gregory CLEMENT <gregory.clement@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- *
- * Maintainer: Gregory CLEMENT <gregory.clement@free-electrons.com>
- */
-
-#include <linux/cpu_pm.h>
-#include <linux/cpuidle.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/suspend.h>
-#include <linux/platform_device.h>
-#include <asm/cpuidle.h>
-
-#define ARMADA_370_XP_MAX_STATES 3
-#define ARMADA_370_XP_FLAG_DEEP_IDLE 0x10000
-
-static int (*armada_370_xp_cpu_suspend)(int);
-
-static int armada_370_xp_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- int ret;
- bool deepidle = false;
- cpu_pm_enter();
-
- if (drv->states[index].flags & ARMADA_370_XP_FLAG_DEEP_IDLE)
- deepidle = true;
-
- ret = armada_370_xp_cpu_suspend(deepidle);
- if (ret)
- return ret;
-
- cpu_pm_exit();
-
- return index;
-}
-
-static struct cpuidle_driver armada_370_xp_idle_driver = {
- .name = "armada_370_xp_idle",
- .states[0] = ARM_CPUIDLE_WFI_STATE,
- .states[1] = {
- .enter = armada_370_xp_enter_idle,
- .exit_latency = 10,
- .power_usage = 50,
- .target_residency = 100,
- .flags = CPUIDLE_FLAG_TIME_VALID,
- .name = "Idle",
- .desc = "CPU power down",
- },
- .states[2] = {
- .enter = armada_370_xp_enter_idle,
- .exit_latency = 100,
- .power_usage = 5,
- .target_residency = 1000,
- .flags = CPUIDLE_FLAG_TIME_VALID |
- ARMADA_370_XP_FLAG_DEEP_IDLE,
- .name = "Deep idle",
- .desc = "CPU and L2 Fabric power down",
- },
- .state_count = ARMADA_370_XP_MAX_STATES,
-};
-
-static int armada_370_xp_cpuidle_probe(struct platform_device *pdev)
-{
-
- armada_370_xp_cpu_suspend = (void *)(pdev->dev.platform_data);
- return cpuidle_register(&armada_370_xp_idle_driver, NULL);
-}
-
-static struct platform_driver armada_370_xp_cpuidle_plat_driver = {
- .driver = {
- .name = "cpuidle-armada-370-xp",
- .owner = THIS_MODULE,
- },
- .probe = armada_370_xp_cpuidle_probe,
-};
-
-module_platform_driver(armada_370_xp_cpuidle_plat_driver);
-
-MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
-MODULE_DESCRIPTION("Armada 370/XP cpu idle driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index b45fc6249041..344d79fa3407 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -163,14 +163,24 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int cpu_id)
return 0;
}
+static const struct of_device_id compatible_machine_match[] = {
+ { .compatible = "arm,vexpress,v2p-ca15_a7" },
+ { .compatible = "samsung,exynos5420" },
+ {},
+};
+
static int __init bl_idle_init(void)
{
int ret;
+ struct device_node *root = of_find_node_by_path("/");
+
+ if (!root)
+ return -ENODEV;
/*
* Initialize the driver just for a compliant set of machines
*/
- if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7"))
+ if (!of_match_node(compatible_machine_match, root))
return -ENODEV;
/*
* For now the differentiation between little and big cores
diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c
index 7c0151263828..ba9b34b579f3 100644
--- a/drivers/cpuidle/cpuidle-exynos.c
+++ b/drivers/cpuidle/cpuidle-exynos.c
@@ -20,25 +20,6 @@
static void (*exynos_enter_aftr)(void);
-static int idle_finisher(unsigned long flags)
-{
- exynos_enter_aftr();
- cpu_do_idle();
-
- return 1;
-}
-
-static int exynos_enter_core0_aftr(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
-{
- cpu_pm_enter();
- cpu_suspend(0, idle_finisher);
- cpu_pm_exit();
-
- return index;
-}
-
static int exynos_enter_lowpower(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@@ -51,8 +32,10 @@ static int exynos_enter_lowpower(struct cpuidle_device *dev,
if (new_index == 0)
return arm_cpuidle_simple_enter(dev, drv, new_index);
- else
- return exynos_enter_core0_aftr(dev, drv, new_index);
+
+ exynos_enter_aftr();
+
+ return new_index;
}
static struct cpuidle_driver exynos_idle_driver = {
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
new file mode 100644
index 000000000000..45371bb16214
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -0,0 +1,150 @@
+/*
+ * Marvell Armada 370, 38x and XP SoC cpuidle driver
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Maintainer: Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <asm/cpuidle.h>
+
+#define MVEBU_V7_FLAG_DEEP_IDLE 0x10000
+
+static int (*mvebu_v7_cpu_suspend)(int);
+
+static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ int ret;
+ bool deepidle = false;
+ cpu_pm_enter();
+
+ if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE)
+ deepidle = true;
+
+ ret = mvebu_v7_cpu_suspend(deepidle);
+ if (ret)
+ return ret;
+
+ cpu_pm_exit();
+
+ return index;
+}
+
+static struct cpuidle_driver armadaxp_idle_driver = {
+ .name = "armada_xp_idle",
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = mvebu_v7_enter_idle,
+ .exit_latency = 10,
+ .power_usage = 50,
+ .target_residency = 100,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "MV CPU IDLE",
+ .desc = "CPU power down",
+ },
+ .states[2] = {
+ .enter = mvebu_v7_enter_idle,
+ .exit_latency = 100,
+ .power_usage = 5,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ MVEBU_V7_FLAG_DEEP_IDLE,
+ .name = "MV CPU DEEP IDLE",
+ .desc = "CPU and L2 Fabric power down",
+ },
+ .state_count = 3,
+};
+
+static struct cpuidle_driver armada370_idle_driver = {
+ .name = "armada_370_idle",
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = mvebu_v7_enter_idle,
+ .exit_latency = 100,
+ .power_usage = 5,
+ .target_residency = 1000,
+ .flags = (CPUIDLE_FLAG_TIME_VALID |
+ MVEBU_V7_FLAG_DEEP_IDLE),
+ .name = "Deep Idle",
+ .desc = "CPU and L2 Fabric power down",
+ },
+ .state_count = 2,
+};
+
+static struct cpuidle_driver armada38x_idle_driver = {
+ .name = "armada_38x_idle",
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .states[1] = {
+ .enter = mvebu_v7_enter_idle,
+ .exit_latency = 10,
+ .power_usage = 5,
+ .target_residency = 100,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "Idle",
+ .desc = "CPU and SCU power down",
+ },
+ .state_count = 2,
+};
+
+static int mvebu_v7_cpuidle_probe(struct platform_device *pdev)
+{
+ mvebu_v7_cpu_suspend = pdev->dev.platform_data;
+
+ if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-xp"))
+ return cpuidle_register(&armadaxp_idle_driver, NULL);
+ else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-370"))
+ return cpuidle_register(&armada370_idle_driver, NULL);
+ else if (!strcmp(pdev->dev.driver->name, "cpuidle-armada-38x"))
+ return cpuidle_register(&armada38x_idle_driver, NULL);
+ else
+ return -EINVAL;
+}
+
+static struct platform_driver armadaxp_cpuidle_plat_driver = {
+ .driver = {
+ .name = "cpuidle-armada-xp",
+ .owner = THIS_MODULE,
+ },
+ .probe = mvebu_v7_cpuidle_probe,
+};
+
+module_platform_driver(armadaxp_cpuidle_plat_driver);
+
+static struct platform_driver armada370_cpuidle_plat_driver = {
+ .driver = {
+ .name = "cpuidle-armada-370",
+ .owner = THIS_MODULE,
+ },
+ .probe = mvebu_v7_cpuidle_probe,
+};
+
+module_platform_driver(armada370_cpuidle_plat_driver);
+
+static struct platform_driver armada38x_cpuidle_plat_driver = {
+ .driver = {
+ .name = "cpuidle-armada-38x",
+ .owner = THIS_MODULE,
+ },
+ .probe = mvebu_v7_cpuidle_probe,
+};
+
+module_platform_driver(armada38x_cpuidle_plat_driver);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell EBU v7 cpuidle driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index 74f5788d50b1..a64be578dab2 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -160,10 +160,10 @@ static int powernv_cpuidle_driver_init(void)
static int powernv_add_idle_states(void)
{
struct device_node *power_mgt;
- struct property *prop;
int nr_idle_states = 1; /* Snooze */
int dt_idle_states;
- u32 *flags;
+ const __be32 *idle_state_flags;
+ u32 len_flags, flags;
int i;
/* Currently we have snooze statically defined */
@@ -174,18 +174,18 @@ static int powernv_add_idle_states(void)
return nr_idle_states;
}
- prop = of_find_property(power_mgt, "ibm,cpu-idle-state-flags", NULL);
- if (!prop) {
+ idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
+ if (!idle_state_flags) {
pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
return nr_idle_states;
}
- dt_idle_states = prop->length / sizeof(u32);
- flags = (u32 *) prop->value;
+ dt_idle_states = len_flags / sizeof(u32);
for (i = 0; i < dt_idle_states; i++) {
- if (flags[i] & IDLE_USE_INST_NAP) {
+ flags = be32_to_cpu(idle_state_flags[i]);
+ if (flags & IDLE_USE_INST_NAP) {
/* Add NAP state */
strcpy(powernv_states[nr_idle_states].name, "Nap");
strcpy(powernv_states[nr_idle_states].desc, "Nap");
@@ -196,7 +196,7 @@ static int powernv_add_idle_states(void)
nr_idle_states++;
}
- if (flags[i] & IDLE_USE_INST_SLEEP) {
+ if (flags & IDLE_USE_INST_SLEEP) {
/* Add FASTSLEEP state */
strcpy(powernv_states[nr_idle_states].name, "FastSleep");
strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index cb7019977c50..ee9df5e3f5eb 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -119,11 +119,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
ktime_t time_start, time_end;
s64 diff;
+ trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
entered_state = target_state->enter(dev, drv, index);
time_end = ktime_get();
+ trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
if (!cpuidle_state_is_coupled(dev, drv, entered_state))
local_irq_enable();
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 9634f20e3926..e431d11abf8d 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -182,10 +182,6 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
static int poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- ktime_t t1, t2;
- s64 diff;
-
- t1 = ktime_get();
local_irq_enable();
if (!current_set_polling_and_test()) {
while (!need_resched())
@@ -193,13 +189,6 @@ static int poll_idle(struct cpuidle_device *dev,
}
current_clr_polling();
- t2 = ktime_get();
- diff = ktime_to_us(ktime_sub(t2, t1));
- if (diff > INT_MAX)
- diff = INT_MAX;
-
- dev->last_residency = (int) diff;
-
return index;
}
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 9f08e8cce1af..044ee0df5871 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -144,7 +144,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
- for (i = 0; i < drv->state_count; i++) {
+ for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
state = &drv->states[i];
lstate = &ldev->states[i];
@@ -156,7 +156,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency;
- if (i > 0)
+ if (i > CPUIDLE_DRIVER_STATE_START)
lstate->threshold.demotion_time = state->exit_latency;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index c4f80c15a48d..ae5a42595e1c 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -35,7 +35,6 @@
#define RESOLUTION 1024
#define DECAY 8
#define MAX_INTERESTING 50000
-#define STDDEV_THRESH 400
/*
@@ -399,7 +398,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
*
* Any measured amount of time will include the exit latency.
* Since we are interested in when the wakeup begun, not when it
- * was completed, we must substract the exit latency. However, if
+ * was completed, we must subtract the exit latency. However, if
* the measured amount of time is less than the exit latency,
* assume the state was never reached and the exit latency is 0.
*/
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index efe2f175168f..97c5903b4606 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -445,7 +445,7 @@ static void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
#define define_one_driver_ro(_name, show) \
static struct cpuidle_driver_attr attr_driver_##_name = \
- __ATTR(_name, 0644, show, NULL)
+ __ATTR(_name, 0444, show, NULL)
struct cpuidle_driver_kobj {
struct cpuidle_driver *drv;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 12fea3e22348..8d2a7728434d 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2617,14 +2617,13 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- dev->desc_virt = pci_alloc_consistent(pdev, sizeof(struct hifn_dma),
- &dev->desc_dma);
+ dev->desc_virt = pci_zalloc_consistent(pdev, sizeof(struct hifn_dma),
+ &dev->desc_dma);
if (!dev->desc_virt) {
dprintk("Failed to allocate descriptor rings.\n");
err = -ENOMEM;
goto err_out_unmap_bars;
}
- memset(dev->desc_virt, 0, sizeof(struct hifn_dma));
dev->pdev = pdev;
dev->irq = pdev->irq;
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 49e74c1fc639..3dced0a9eae3 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -68,7 +68,6 @@ comment "DEVFREQ Drivers"
config ARM_EXYNOS4_BUS_DEVFREQ
bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
depends on (CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
- select ARCH_HAS_OPP
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_OPP
help
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 8f6afbf9ba54..9b1ea0ef59af 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -393,6 +393,22 @@ config XILINX_VDMA
channels, Memory Mapped to Stream (MM2S) and Stream to
Memory Mapped (S2MM) for the data transfers.
+config DMA_SUN6I
+ tristate "Allwinner A31 SoCs DMA support"
+ depends on MACH_SUN6I || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support for the DMA engine for Allwinner A31 SoCs.
+
+config NBPFAXI_DMA
+ tristate "Renesas Type-AXI NBPF DMA support"
+ select DMA_ENGINE
+ depends on ARM || COMPILE_TEST
+ help
+ Support for "Type-AXI" NBPF DMA IPs from Renesas
+
config DMA_ENGINE
bool
@@ -406,6 +422,7 @@ config DMA_ACPI
config DMA_OF
def_bool y
depends on OF
+ select DMA_ENGINE
comment "DMA Clients"
depends on DMA_ENGINE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index bd9e7fa928bd..c6adb925f0b9 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,5 +1,5 @@
-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
+subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG
+subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
@@ -48,3 +48,5 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
obj-y += xilinx/
obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
+obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
+obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index 734ed0206cd5..b8045cd42ee1 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -7,7 +7,6 @@ TODO for slave dma
- imx-dma
- imx-sdma
- mxs-dma.c
- - dw_dmac
- intel_mid_dma
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 8114731a1c62..e34024b000a4 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1040,7 +1040,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
if (early_bytes) {
dev_vdbg(&pl08x->adev->dev,
- "%s byte width LLIs (remain 0x%08x)\n",
+ "%s byte width LLIs (remain 0x%08zx)\n",
__func__, bd.remainder);
prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes,
num_llis++, &total_bytes);
@@ -1653,7 +1653,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
@@ -1662,7 +1662,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
dma_addr_t slave_addr;
dev_dbg(&pl08x->adev->dev,
- "%s prepare cyclic transaction of %d/%d bytes %s %s\n",
+ "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n",
__func__, period_len, buf_len,
direction == DMA_MEM_TO_DEV ? "to" : "from",
plchan->name);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index c13a3bb0f594..ca9dd2613283 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -294,14 +294,16 @@ static int atc_get_bytes_left(struct dma_chan *chan)
ret = -EINVAL;
goto out;
}
- atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
- << (desc_first->tx_width);
- if (atchan->remain_desc < 0) {
+
+ count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX)
+ << desc_first->tx_width;
+ if (atchan->remain_desc < count) {
ret = -EINVAL;
goto out;
- } else {
- ret = atchan->remain_desc;
}
+
+ atchan->remain_desc -= count;
+ ret = atchan->remain_desc;
} else {
/*
* Get residual bytes when current
@@ -893,12 +895,11 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
* @period_len: number of bytes for each period
* @direction: transfer direction, to or from device
* @flags: tx descriptor status flags
- * @context: transfer context (ignored)
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index a03602164e3e..68007974961a 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -335,7 +335,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
enum dma_slave_buswidth dev_width;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index 94c380f07538..6a9d89c93b1f 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -433,7 +433,7 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_desc *desc;
@@ -614,4 +614,4 @@ module_platform_driver(jz4740_dma_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("JZ4740 DMA driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index a27ded53ab4f..1af731b83b3f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -279,6 +279,19 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
channel_set_bit(dw, CH_EN, dwc->mask);
}
+static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
+{
+ struct dw_desc *desc;
+
+ if (list_empty(&dwc->queue))
+ return;
+
+ list_move(dwc->queue.next, &dwc->active_list);
+ desc = dwc_first_active(dwc);
+ dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
+ dwc_dostart(dwc, desc);
+}
+
/*----------------------------------------------------------------------*/
static void
@@ -335,10 +348,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
* the completed ones.
*/
list_splice_init(&dwc->active_list, &list);
- if (!list_empty(&dwc->queue)) {
- list_move(dwc->queue.next, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- }
+ dwc_dostart_first_queued(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -467,10 +477,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
/* Try to continue after resetting the channel... */
dwc_chan_disable(dw, dwc);
- if (!list_empty(&dwc->queue)) {
- list_move(dwc->queue.next, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- }
+ dwc_dostart_first_queued(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
@@ -677,17 +684,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
* possible, perhaps even appending to those already submitted
* for DMA. But this is hard to do in a race-free manner.
*/
- if (list_empty(&dwc->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &dwc->active_list);
- dwc_dostart(dwc, dwc_first_active(dwc));
- } else {
- dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &dwc->queue);
- }
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie);
+ list_add_tail(&desc->desc_node, &dwc->queue);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1092,9 +1091,12 @@ dwc_tx_status(struct dma_chan *chan,
static void dwc_issue_pending(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ unsigned long flags;
- if (!list_empty(&dwc->queue))
- dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
+ spin_lock_irqsave(&dwc->lock, flags);
+ if (list_empty(&dwc->active_list))
+ dwc_dostart_first_queued(dwc);
+ spin_unlock_irqrestore(&dwc->lock, flags);
}
static int dwc_alloc_chan_resources(struct dma_chan *chan)
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d08c4dedef35..7b65633f495e 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
#include <linux/platform_data/edma.h>
@@ -256,8 +257,13 @@ static int edma_terminate_all(struct edma_chan *echan)
* echan->edesc is NULL and exit.)
*/
if (echan->edesc) {
+ int cyclic = echan->edesc->cyclic;
echan->edesc = NULL;
edma_stop(echan->ch_num);
+ /* Move the cyclic channel back to default queue */
+ if (cyclic)
+ edma_assign_channel_eventq(echan->ch_num,
+ EVENTQ_DEFAULT);
}
vchan_get_all_descriptors(&echan->vchan, &head);
@@ -592,7 +598,7 @@ struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long tx_flags, void *context)
+ unsigned long tx_flags)
{
struct edma_chan *echan = to_edma_chan(chan);
struct device *dev = chan->device->dev;
@@ -718,12 +724,15 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
edesc->absync = ret;
/*
- * Enable interrupts for every period because callback
- * has to be called for every period.
+ * Enable period interrupt only if it is requested
*/
- edesc->pset[i].param.opt |= TCINTEN;
+ if (tx_flags & DMA_PREP_INTERRUPT)
+ edesc->pset[i].param.opt |= TCINTEN;
}
+ /* Place the cyclic channel to highest priority queue */
+ edma_assign_channel_eventq(echan->ch_num, EVENTQ_0);
+
return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
}
@@ -982,6 +991,7 @@ static void __init edma_chan_init(struct edma_cc *ecc,
#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
static int edma_dma_device_slave_caps(struct dma_chan *dchan,
@@ -992,7 +1002,7 @@ static int edma_dma_device_slave_caps(struct dma_chan *dchan,
caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
caps->cmd_pause = true;
caps->cmd_terminate = true;
- caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
return 0;
}
@@ -1039,7 +1049,7 @@ static int edma_probe(struct platform_device *pdev)
ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY);
if (ecc->dummy_slot < 0) {
dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n");
- return -EIO;
+ return ecc->dummy_slot;
}
dma_cap_zero(ecc->dma_slave.cap_mask);
@@ -1124,7 +1134,7 @@ static int edma_init(void)
}
}
- if (EDMA_CTLRS == 2) {
+ if (!of_have_populated_dt() && EDMA_CTLRS == 2) {
pdev1 = platform_device_register_full(&edma_dev_info1);
if (IS_ERR(pdev1)) {
platform_driver_unregister(&edma_driver);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index cb4bf682a708..7650470196c4 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1092,7 +1092,6 @@ fail:
* @period_len: length of a single period
* @dir: direction of the operation
* @flags: tx descriptor status flags
- * @context: operation context (ignored)
*
* Prepares a descriptor for cyclic DMA operation. This means that once the
* descriptor is submitted, we will be submitting in a @period_len sized
@@ -1105,8 +1104,7 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction dir, unsigned long flags,
- void *context)
+ enum dma_transfer_direction dir, unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index b396a7fb53ab..3c5711d5fe97 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -248,11 +248,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
- void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
+ void __iomem *muxaddr;
unsigned chans_per_mux, ch_off;
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+ muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
if (enable)
edma_writeb(fsl_chan->edma,
@@ -516,7 +517,7 @@ err:
static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
@@ -724,6 +725,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
{
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
struct dma_chan *chan, *_chan;
+ unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
if (dma_spec->args_count != 2)
return NULL;
@@ -732,7 +734,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count)
continue;
- if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
+ if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) {
chan = dma_get_slave_channel(chan);
if (chan) {
chan->device->privatecnt++;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index e0fec68aed25..d5d6885ab341 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -396,10 +396,17 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
- unsigned long flags;
dma_cookie_t cookie = -EINVAL;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
+
+#ifdef CONFIG_PM
+ if (unlikely(chan->pm_state != RUNNING)) {
+ chan_dbg(chan, "cannot submit due to suspend\n");
+ spin_unlock_bh(&chan->desc_lock);
+ return -1;
+ }
+#endif
/*
* assign cookies to all of the software descriptors
@@ -412,7 +419,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
/* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return cookie;
}
@@ -459,6 +466,88 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
}
/**
+ * fsldma_clean_completed_descriptor - free all descriptors which
+ * has been completed and acked
+ * @chan: Freescale DMA channel
+ *
+ * This function is used on all completed and acked descriptors.
+ * All descriptors should only be freed in this function.
+ */
+static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
+{
+ struct fsl_desc_sw *desc, *_desc;
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
+ if (async_tx_test_ack(&desc->async_tx))
+ fsl_dma_free_descriptor(chan, desc);
+}
+
+/**
+ * fsldma_run_tx_complete_actions - cleanup a single link descriptor
+ * @chan: Freescale DMA channel
+ * @desc: descriptor to cleanup and free
+ * @cookie: Freescale DMA transaction identifier
+ *
+ * This function is used on a descriptor which has been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies.
+ */
+static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc, dma_cookie_t cookie)
+{
+ struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ dma_cookie_t ret = cookie;
+
+ BUG_ON(txd->cookie < 0);
+
+ if (txd->cookie > 0) {
+ ret = txd->cookie;
+
+ /* Run the link descriptor callback function */
+ if (txd->callback) {
+ chan_dbg(chan, "LD %p callback\n", desc);
+ txd->callback(txd->callback_param);
+ }
+ }
+
+ /* Run any dependencies */
+ dma_run_dependencies(txd);
+
+ return ret;
+}
+
+/**
+ * fsldma_clean_running_descriptor - move the completed descriptor from
+ * ld_running to ld_completed
+ * @chan: Freescale DMA channel
+ * @desc: the descriptor which is completed
+ *
+ * Free the descriptor directly if acked by async_tx api, or move it to
+ * queue ld_completed.
+ */
+static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ /* Remove from the list of transactions */
+ list_del(&desc->node);
+
+ /*
+ * the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx)) {
+ /*
+ * Move this descriptor to the list of descriptors which is
+ * completed, but still awaiting the 'ack' bit to be set.
+ */
+ list_add_tail(&desc->node, &chan->ld_completed);
+ return;
+ }
+
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+}
+
+/**
* fsl_chan_xfer_ld_queue - transfer any pending transactions
* @chan : Freescale DMA channel
*
@@ -526,31 +615,58 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
}
/**
- * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
+ * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
+ * and move them to ld_completed to free until flag 'ack' is set
* @chan: Freescale DMA channel
- * @desc: descriptor to cleanup and free
*
- * This function is used on a descriptor which has been executed by the DMA
- * controller. It will run any callbacks, submit any dependencies, and then
- * free the descriptor.
+ * This function is used on descriptors which have been executed by the DMA
+ * controller. It will run any callbacks, submit any dependencies, then
+ * free these descriptors if flag 'ack' is set.
*/
-static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
+static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
{
- struct dma_async_tx_descriptor *txd = &desc->async_tx;
+ struct fsl_desc_sw *desc, *_desc;
+ dma_cookie_t cookie = 0;
+ dma_addr_t curr_phys = get_cdar(chan);
+ int seen_current = 0;
+
+ fsldma_clean_completed_descriptor(chan);
+
+ /* Run the callback for each descriptor, in order */
+ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
+ /*
+ * do not advance past the current descriptor loaded into the
+ * hardware channel, subsequent descriptors are either in
+ * process or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /*
+ * stop the search if we reach the current descriptor and the
+ * channel is busy
+ */
+ if (desc->async_tx.phys == curr_phys) {
+ seen_current = 1;
+ if (!dma_is_idle(chan))
+ break;
+ }
+
+ cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
- /* Run the link descriptor callback function */
- if (txd->callback) {
- chan_dbg(chan, "LD %p callback\n", desc);
- txd->callback(txd->callback_param);
+ fsldma_clean_running_descriptor(chan, desc);
}
- /* Run any dependencies */
- dma_run_dependencies(txd);
+ /*
+ * Start any pending transactions automatically
+ *
+ * In the ideal case, we keep the DMA controller busy while we go
+ * ahead and free the descriptors below.
+ */
+ fsl_chan_xfer_ld_queue(chan);
- dma_descriptor_unmap(txd);
- chan_dbg(chan, "LD %p free\n", desc);
- dma_pool_free(chan->desc_pool, desc, txd->phys);
+ if (cookie > 0)
+ chan->common.completed_cookie = cookie;
}
/**
@@ -617,13 +733,14 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
chan_dbg(chan, "free all channel resources\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
+ fsldma_cleanup_descriptors(chan);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ fsldma_free_desc_list(chan, &chan->ld_completed);
+ spin_unlock_bh(&chan->desc_lock);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
@@ -842,7 +959,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
{
struct dma_slave_config *config;
struct fsldma_chan *chan;
- unsigned long flags;
int size;
if (!dchan)
@@ -852,7 +968,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
switch (cmd) {
case DMA_TERMINATE_ALL:
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
/* Halt the DMA engine */
dma_halt(chan);
@@ -860,9 +976,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
/* Remove and free all of the descriptors in the LD queue */
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
+ fsldma_free_desc_list(chan, &chan->ld_completed);
chan->idle = true;
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
return 0;
case DMA_SLAVE_CONFIG:
@@ -904,11 +1021,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
- unsigned long flags;
- spin_lock_irqsave(&chan->desc_lock, flags);
+ spin_lock_bh(&chan->desc_lock);
fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
+ spin_unlock_bh(&chan->desc_lock);
}
/**
@@ -919,6 +1035,17 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(dchan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_bh(&chan->desc_lock);
+ fsldma_cleanup_descriptors(chan);
+ spin_unlock_bh(&chan->desc_lock);
+
return dma_cookie_status(dchan, cookie, txstate);
}
@@ -996,52 +1123,18 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
- struct fsl_desc_sw *desc, *_desc;
- LIST_HEAD(ld_cleanup);
- unsigned long flags;
chan_dbg(chan, "tasklet entry\n");
- spin_lock_irqsave(&chan->desc_lock, flags);
-
- /* update the cookie if we have some descriptors to cleanup */
- if (!list_empty(&chan->ld_running)) {
- dma_cookie_t cookie;
-
- desc = to_fsl_desc(chan->ld_running.prev);
- cookie = desc->async_tx.cookie;
- dma_cookie_complete(&desc->async_tx);
-
- chan_dbg(chan, "completed_cookie=%d\n", cookie);
- }
-
- /*
- * move the descriptors to a temporary list so we can drop the lock
- * during the entire cleanup operation
- */
- list_splice_tail_init(&chan->ld_running, &ld_cleanup);
+ spin_lock_bh(&chan->desc_lock);
/* the hardware is now idle and ready for more */
chan->idle = true;
- /*
- * Start any pending transactions automatically
- *
- * In the ideal case, we keep the DMA controller busy while we go
- * ahead and free the descriptors below.
- */
- fsl_chan_xfer_ld_queue(chan);
- spin_unlock_irqrestore(&chan->desc_lock, flags);
-
- /* Run the callback for each descriptor, in order */
- list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
-
- /* Remove from the list of transactions */
- list_del(&desc->node);
+ /* Run all cleanup for descriptors which have been completed */
+ fsldma_cleanup_descriptors(chan);
- /* Run all cleanup for this descriptor */
- fsldma_cleanup_descriptor(chan, desc);
- }
+ spin_unlock_bh(&chan->desc_lock);
chan_dbg(chan, "tasklet exit\n");
}
@@ -1225,7 +1318,11 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
spin_lock_init(&chan->desc_lock);
INIT_LIST_HEAD(&chan->ld_pending);
INIT_LIST_HEAD(&chan->ld_running);
+ INIT_LIST_HEAD(&chan->ld_completed);
chan->idle = true;
+#ifdef CONFIG_PM
+ chan->pm_state = RUNNING;
+#endif
chan->common.device = &fdev->common;
dma_cookie_init(&chan->common);
@@ -1365,6 +1462,69 @@ static int fsldma_of_remove(struct platform_device *op)
return 0;
}
+#ifdef CONFIG_PM
+static int fsldma_suspend_late(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_chan *chan;
+ int i;
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ spin_lock_bh(&chan->desc_lock);
+ if (unlikely(!chan->idle))
+ goto out;
+ chan->regs_save.mr = get_mr(chan);
+ chan->pm_state = SUSPENDED;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+ return 0;
+
+out:
+ for (; i >= 0; i--) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+ chan->pm_state = RUNNING;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+ return -EBUSY;
+}
+
+static int fsldma_resume_early(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fsldma_device *fdev = platform_get_drvdata(pdev);
+ struct fsldma_chan *chan;
+ u32 mode;
+ int i;
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ spin_lock_bh(&chan->desc_lock);
+ mode = chan->regs_save.mr
+ & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
+ set_mr(chan, mode);
+ chan->pm_state = RUNNING;
+ spin_unlock_bh(&chan->desc_lock);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops fsldma_pm_ops = {
+ .suspend_late = fsldma_suspend_late,
+ .resume_early = fsldma_resume_early,
+};
+#endif
+
static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,elo3-dma", },
{ .compatible = "fsl,eloplus-dma", },
@@ -1377,6 +1537,9 @@ static struct platform_driver fsldma_of_driver = {
.name = "fsl-elo-dma",
.owner = THIS_MODULE,
.of_match_table = fsldma_of_ids,
+#ifdef CONFIG_PM
+ .pm = &fsldma_pm_ops,
+#endif
},
.probe = fsldma_of_probe,
.remove = fsldma_of_remove,
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index d56e83599825..239c20c84382 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -134,12 +134,36 @@ struct fsldma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000
+#ifdef CONFIG_PM
+struct fsldma_chan_regs_save {
+ u32 mr;
+};
+
+enum fsldma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+#endif
+
struct fsldma_chan {
char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs;
spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_pending; /* Link descriptors queue */
- struct list_head ld_running; /* Link descriptors queue */
+ /*
+ * Descriptors which are queued to run, but have not yet been
+ * submitted to the hardware for execution
+ */
+ struct list_head ld_pending;
+ /*
+ * Descriptors which are currently being executed by the hardware
+ */
+ struct list_head ld_running;
+ /*
+ * Descriptors which have finished execution by the hardware. These
+ * descriptors have already had their cleanup actions run. They are
+ * waiting for the ACK bit to be set by the async_tx API.
+ */
+ struct list_head ld_completed; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */
@@ -148,6 +172,10 @@ struct fsldma_chan {
struct tasklet_struct tasklet;
u32 feature;
bool idle; /* DMA controller is idle */
+#ifdef CONFIG_PM
+ struct fsldma_chan_regs_save regs_save;
+ enum fsldma_pm_state pm_state;
+#endif
void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 286660a12cc6..9d2c9e7374dc 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -866,7 +866,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 14867e3ac8ff..f7626e37d0b8 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -271,6 +271,7 @@ struct sdma_channel {
unsigned int chn_count;
unsigned int chn_real_count;
struct tasklet_struct tasklet;
+ struct imx_dma_data data;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -749,6 +750,11 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
per_2_per = sdma->script_addrs->per_2_per_addr;
break;
+ case IMX_DMATYPE_ASRC_SP:
+ per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ per_2_per = sdma->script_addrs->per_2_per_addr;
+ break;
case IMX_DMATYPE_MSHC:
per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
@@ -911,14 +917,13 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
int channel = sdmac->channel;
int ret = -EBUSY;
- sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
+ sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
+ GFP_KERNEL);
if (!sdmac->bd) {
ret = -ENOMEM;
goto out;
}
- memset(sdmac->bd, 0, PAGE_SIZE);
-
sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
@@ -1120,7 +1125,7 @@ err_out:
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1414,12 +1419,14 @@ err_dma_alloc:
static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
struct imx_dma_data *data = fn_param;
if (!imx_dma_is_general_purpose(chan))
return false;
- chan->private = data;
+ sdmac->data = *data;
+ chan->private = &sdmac->data;
return true;
}
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 128ca143486d..bbf62927bd72 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1532,11 +1532,17 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan)
#ifdef DEBUG
if (chan->chan_id == IDMAC_IC_7) {
ic_sof = ipu_irq_map(69);
- if (ic_sof > 0)
- request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
+ if (ic_sof > 0) {
+ ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
+ if (ret)
+ dev_err(&chan->dev->device, "request irq failed for IC SOF");
+ }
ic_eof = ipu_irq_map(70);
- if (ic_eof > 0)
- request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
+ if (ic_eof > 0) {
+ ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
+ if (ret)
+ dev_err(&chan->dev->device, "request irq failed for IC EOF");
+ }
}
#endif
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a7b186d536b3..a1a4db5721b8 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -601,7 +601,7 @@ static struct dma_async_tx_descriptor *
mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
dma_addr_t buf_addr, size_t len, size_t period_len,
enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct mmp_pdma_chan *chan;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 724f7f4c9720..6ad30e2c5038 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -389,7 +389,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
struct mmp_tdma_desc *desc;
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 2ad43738ac8b..881db2bcb48b 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -53,6 +53,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/of_dma.h>
#include <linux/of_platform.h>
#include <linux/random.h>
@@ -1036,7 +1037,15 @@ static int mpc_dma_probe(struct platform_device *op)
if (retval)
goto err_free2;
- return retval;
+ /* Register with OF helpers for DMA lookups (nonfatal) */
+ if (dev->of_node) {
+ retval = of_dma_controller_register(dev->of_node,
+ of_dma_xlate_by_chan_id, mdma);
+ if (retval)
+ dev_warn(dev, "Could not register for OF lookup\n");
+ }
+
+ return 0;
err_free2:
if (mdma->is_mpc8308)
@@ -1057,6 +1066,8 @@ static int mpc_dma_remove(struct platform_device *op)
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
+ if (dev->of_node)
+ of_dma_controller_free(dev->of_node);
dma_async_device_unregister(&mdma->dma);
if (mdma->is_mpc8308) {
free_irq(mdma->irq2, mdma);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ead491346da7..5ea61201dbf0 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -413,16 +413,14 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int ret;
- mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev,
- CCW_BLOCK_SIZE, &mxs_chan->ccw_phys,
- GFP_KERNEL);
+ mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev,
+ CCW_BLOCK_SIZE,
+ &mxs_chan->ccw_phys, GFP_KERNEL);
if (!mxs_chan->ccw) {
ret = -ENOMEM;
goto err_alloc;
}
- memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE);
-
if (mxs_chan->chan_irq != NO_IRQ) {
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
0, "mxs-dma", mxs_dma);
@@ -591,7 +589,7 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
new file mode 100644
index 000000000000..5aeada56a442
--- /dev/null
+++ b/drivers/dma/nbpfaxi.c
@@ -0,0 +1,1517 @@
+/*
+ * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/dma/nbpfaxi.h>
+
+#include "dmaengine.h"
+
+#define NBPF_REG_CHAN_OFFSET 0
+#define NBPF_REG_CHAN_SIZE 0x40
+
+/* Channel Current Transaction Byte register */
+#define NBPF_CHAN_CUR_TR_BYTE 0x20
+
+/* Channel Status register */
+#define NBPF_CHAN_STAT 0x24
+#define NBPF_CHAN_STAT_EN 1
+#define NBPF_CHAN_STAT_TACT 4
+#define NBPF_CHAN_STAT_ERR 0x10
+#define NBPF_CHAN_STAT_END 0x20
+#define NBPF_CHAN_STAT_TC 0x40
+#define NBPF_CHAN_STAT_DER 0x400
+
+/* Channel Control register */
+#define NBPF_CHAN_CTRL 0x28
+#define NBPF_CHAN_CTRL_SETEN 1
+#define NBPF_CHAN_CTRL_CLREN 2
+#define NBPF_CHAN_CTRL_STG 4
+#define NBPF_CHAN_CTRL_SWRST 8
+#define NBPF_CHAN_CTRL_CLRRQ 0x10
+#define NBPF_CHAN_CTRL_CLREND 0x20
+#define NBPF_CHAN_CTRL_CLRTC 0x40
+#define NBPF_CHAN_CTRL_SETSUS 0x100
+#define NBPF_CHAN_CTRL_CLRSUS 0x200
+
+/* Channel Configuration register */
+#define NBPF_CHAN_CFG 0x2c
+#define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */
+#define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */
+#define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */
+#define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */
+#define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */
+#define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */
+#define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */
+#define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */
+#define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */
+#define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */
+#define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */
+#define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */
+#define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */
+#define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */
+#define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */
+#define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */
+#define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */
+#define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */
+
+#define NBPF_CHAN_NXLA 0x38
+#define NBPF_CHAN_CRLA 0x3c
+
+/* Link Header field */
+#define NBPF_HEADER_LV 1
+#define NBPF_HEADER_LE 2
+#define NBPF_HEADER_WBD 4
+#define NBPF_HEADER_DIM 8
+
+#define NBPF_CTRL 0x300
+#define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */
+#define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */
+
+#define NBPF_DSTAT_ER 0x314
+#define NBPF_DSTAT_END 0x318
+
+#define NBPF_DMA_BUSWIDTHS \
+ (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct nbpf_config {
+ int num_channels;
+ int buffer_size;
+};
+
+/*
+ * We've got 3 types of objects, used to describe DMA transfers:
+ * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object
+ * in it, used to communicate with the user
+ * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer
+ * queuing, these must be DMAable, using either the streaming DMA API or
+ * allocated from coherent memory - one per SG segment
+ * 3. one per SG segment descriptors, used to manage HW link descriptors from
+ * (2). They do not have to be DMAable. They can either be (a) allocated
+ * together with link descriptors as mixed (DMA / CPU) objects, or (b)
+ * separately. Even if allocated separately it would be best to link them
+ * to link descriptors once during channel resource allocation and always
+ * use them as a single object.
+ * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be
+ * treated as a single SG segment descriptor.
+ */
+
+struct nbpf_link_reg {
+ u32 header;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 transaction_size;
+ u32 config;
+ u32 interval;
+ u32 extension;
+ u32 next;
+} __packed;
+
+struct nbpf_device;
+struct nbpf_channel;
+struct nbpf_desc;
+
+struct nbpf_link_desc {
+ struct nbpf_link_reg *hwdesc;
+ dma_addr_t hwdesc_dma_addr;
+ struct nbpf_desc *desc;
+ struct list_head node;
+};
+
+/**
+ * struct nbpf_desc - DMA transfer descriptor
+ * @async_tx: dmaengine object
+ * @user_wait: waiting for a user ack
+ * @length: total transfer length
+ * @sg: list of hardware descriptors, represented by struct nbpf_link_desc
+ * @node: member in channel descriptor lists
+ */
+struct nbpf_desc {
+ struct dma_async_tx_descriptor async_tx;
+ bool user_wait;
+ size_t length;
+ struct nbpf_channel *chan;
+ struct list_head sg;
+ struct list_head node;
+};
+
+/* Take a wild guess: allocate 4 segments per descriptor */
+#define NBPF_SEGMENTS_PER_DESC 4
+#define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \
+ (sizeof(struct nbpf_desc) + \
+ NBPF_SEGMENTS_PER_DESC * \
+ (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg))))
+#define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE)
+
+struct nbpf_desc_page {
+ struct list_head node;
+ struct nbpf_desc desc[NBPF_DESCS_PER_PAGE];
+ struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE];
+ struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE];
+};
+
+/**
+ * struct nbpf_channel - one DMAC channel
+ * @dma_chan: standard dmaengine channel object
+ * @base: register address base
+ * @nbpf: DMAC
+ * @name: IRQ name
+ * @irq: IRQ number
+ * @slave_addr: address for slave DMA
+ * @slave_width:slave data size in bytes
+ * @slave_burst:maximum slave burst size in bytes
+ * @terminal: DMA terminal, assigned to this channel
+ * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG
+ * @flags: configuration flags from DT
+ * @lock: protect descriptor lists
+ * @free_links: list of free link descriptors
+ * @free: list of free descriptors
+ * @queued: list of queued descriptors
+ * @active: list of descriptors, scheduled for processing
+ * @done: list of completed descriptors, waiting post-processing
+ * @desc_page: list of additionally allocated descriptor pages - if any
+ */
+struct nbpf_channel {
+ struct dma_chan dma_chan;
+ struct tasklet_struct tasklet;
+ void __iomem *base;
+ struct nbpf_device *nbpf;
+ char name[16];
+ int irq;
+ dma_addr_t slave_src_addr;
+ size_t slave_src_width;
+ size_t slave_src_burst;
+ dma_addr_t slave_dst_addr;
+ size_t slave_dst_width;
+ size_t slave_dst_burst;
+ unsigned int terminal;
+ u32 dmarq_cfg;
+ unsigned long flags;
+ spinlock_t lock;
+ struct list_head free_links;
+ struct list_head free;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head done;
+ struct list_head desc_page;
+ struct nbpf_desc *running;
+ bool paused;
+};
+
+struct nbpf_device {
+ struct dma_device dma_dev;
+ void __iomem *base;
+ struct clk *clk;
+ const struct nbpf_config *config;
+ struct nbpf_channel chan[];
+};
+
+enum nbpf_model {
+ NBPF1B4,
+ NBPF1B8,
+ NBPF1B16,
+ NBPF4B4,
+ NBPF4B8,
+ NBPF4B16,
+ NBPF8B4,
+ NBPF8B8,
+ NBPF8B16,
+};
+
+static struct nbpf_config nbpf_cfg[] = {
+ [NBPF1B4] = {
+ .num_channels = 1,
+ .buffer_size = 4,
+ },
+ [NBPF1B8] = {
+ .num_channels = 1,
+ .buffer_size = 8,
+ },
+ [NBPF1B16] = {
+ .num_channels = 1,
+ .buffer_size = 16,
+ },
+ [NBPF4B4] = {
+ .num_channels = 4,
+ .buffer_size = 4,
+ },
+ [NBPF4B8] = {
+ .num_channels = 4,
+ .buffer_size = 8,
+ },
+ [NBPF4B16] = {
+ .num_channels = 4,
+ .buffer_size = 16,
+ },
+ [NBPF8B4] = {
+ .num_channels = 8,
+ .buffer_size = 4,
+ },
+ [NBPF8B8] = {
+ .num_channels = 8,
+ .buffer_size = 8,
+ },
+ [NBPF8B16] = {
+ .num_channels = 8,
+ .buffer_size = 16,
+ },
+};
+
+#define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan)
+
+/*
+ * dmaengine drivers seem to have a lot in common and instead of sharing more
+ * code, they reimplement those common algorithms independently. In this driver
+ * we try to separate the hardware-specific part from the (largely) generic
+ * part. This improves code readability and makes it possible in the future to
+ * reuse the generic code in form of a helper library. That generic code should
+ * be suitable for various DMA controllers, using transfer descriptors in RAM
+ * and pushing one SG list at a time to the DMA controller.
+ */
+
+/* Hardware-specific part */
+
+static inline u32 nbpf_chan_read(struct nbpf_channel *chan,
+ unsigned int offset)
+{
+ u32 data = ioread32(chan->base + offset);
+ dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, chan->base, offset, data);
+ return data;
+}
+
+static inline void nbpf_chan_write(struct nbpf_channel *chan,
+ unsigned int offset, u32 data)
+{
+ iowrite32(data, chan->base + offset);
+ dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, chan->base, offset, data);
+}
+
+static inline u32 nbpf_read(struct nbpf_device *nbpf,
+ unsigned int offset)
+{
+ u32 data = ioread32(nbpf->base + offset);
+ dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, nbpf->base, offset, data);
+ return data;
+}
+
+static inline void nbpf_write(struct nbpf_device *nbpf,
+ unsigned int offset, u32 data)
+{
+ iowrite32(data, nbpf->base + offset);
+ dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n",
+ __func__, nbpf->base, offset, data);
+}
+
+static void nbpf_chan_halt(struct nbpf_channel *chan)
+{
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
+}
+
+static bool nbpf_status_get(struct nbpf_channel *chan)
+{
+ u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END);
+
+ return status & BIT(chan - chan->nbpf->chan);
+}
+
+static void nbpf_status_ack(struct nbpf_channel *chan)
+{
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND);
+}
+
+static u32 nbpf_error_get(struct nbpf_device *nbpf)
+{
+ return nbpf_read(nbpf, NBPF_DSTAT_ER);
+}
+
+static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error)
+{
+ return nbpf->chan + __ffs(error);
+}
+
+static void nbpf_error_clear(struct nbpf_channel *chan)
+{
+ u32 status;
+ int i;
+
+ /* Stop the channel, make sure DMA has been aborted */
+ nbpf_chan_halt(chan);
+
+ for (i = 1000; i; i--) {
+ status = nbpf_chan_read(chan, NBPF_CHAN_STAT);
+ if (!(status & NBPF_CHAN_STAT_TACT))
+ break;
+ cpu_relax();
+ }
+
+ if (!i)
+ dev_err(chan->dma_chan.device->dev,
+ "%s(): abort timeout, channel status 0x%x\n", __func__, status);
+
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST);
+}
+
+static int nbpf_start(struct nbpf_desc *desc)
+{
+ struct nbpf_channel *chan = desc->chan;
+ struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node);
+
+ nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr);
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS);
+ chan->paused = false;
+
+ /* Software trigger MEMCPY - only MEMCPY uses the block mode */
+ if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM)
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG);
+
+ dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__,
+ nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA));
+
+ return 0;
+}
+
+static void nbpf_chan_prepare(struct nbpf_channel *chan)
+{
+ chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) |
+ (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) |
+ (chan->flags & NBPF_SLAVE_RQ_LEVEL ?
+ NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) |
+ chan->terminal;
+}
+
+static void nbpf_chan_prepare_default(struct nbpf_channel *chan)
+{
+ /* Don't output DMAACK */
+ chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400;
+ chan->terminal = 0;
+ chan->flags = 0;
+}
+
+static void nbpf_chan_configure(struct nbpf_channel *chan)
+{
+ /*
+ * We assume, that only the link mode and DMA request line configuration
+ * have to be set in the configuration register manually. Dynamic
+ * per-transfer configuration will be loaded from transfer descriptors.
+ */
+ nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg);
+}
+
+static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size)
+{
+ /* Maximum supported bursts depend on the buffer size */
+ return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8));
+}
+
+static size_t nbpf_xfer_size(struct nbpf_device *nbpf,
+ enum dma_slave_buswidth width, u32 burst)
+{
+ size_t size;
+
+ if (!burst)
+ burst = 1;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ size = 8 * burst;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ size = 4 * burst;
+ break;
+
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ size = 2 * burst;
+ break;
+
+ default:
+ pr_warn("%s(): invalid bus width %u\n", __func__, width);
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ size = burst;
+ }
+
+ return nbpf_xfer_ds(nbpf, size);
+}
+
+/*
+ * We need a way to recognise slaves, whose data is sent "raw" over the bus,
+ * i.e. it isn't known in advance how many bytes will be received. Therefore
+ * the slave driver has to provide a "large enough" buffer and either read the
+ * buffer, when it is full, or detect, that some data has arrived, then wait for
+ * a timeout, if no more data arrives - receive what's already there. We want to
+ * handle such slaves in a special way to allow an optimised mode for other
+ * users, for whom the amount of data is known in advance. So far there's no way
+ * to recognise such slaves. We use a data-width check to distinguish between
+ * the SD host and the PL011 UART.
+ */
+
+static int nbpf_prep_one(struct nbpf_link_desc *ldesc,
+ enum dma_transfer_direction direction,
+ dma_addr_t src, dma_addr_t dst, size_t size, bool last)
+{
+ struct nbpf_link_reg *hwdesc = ldesc->hwdesc;
+ struct nbpf_desc *desc = ldesc->desc;
+ struct nbpf_channel *chan = desc->chan;
+ struct device *dev = chan->dma_chan.device->dev;
+ size_t mem_xfer, slave_xfer;
+ bool can_burst;
+
+ hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV |
+ (last ? NBPF_HEADER_LE : 0);
+
+ hwdesc->src_addr = src;
+ hwdesc->dst_addr = dst;
+ hwdesc->transaction_size = size;
+
+ /*
+ * set config: SAD, DAD, DDS, SDS, etc.
+ * Note on transfer sizes: the DMAC can perform unaligned DMA transfers,
+ * but it is important to have transaction size a multiple of both
+ * receiver and transmitter transfer sizes. It is also possible to use
+ * different RAM and device transfer sizes, and it does work well with
+ * some devices, e.g. with V08R07S01E SD host controllers, which can use
+ * 128 byte transfers. But this doesn't work with other devices,
+ * especially when the transaction size is unknown. This is the case,
+ * e.g. with serial drivers like amba-pl011.c. For reception it sets up
+ * the transaction size of 4K and if fewer bytes are received, it
+ * pauses DMA and reads out data received via DMA as well as those left
+ * in the Rx FIFO. For this to work with the RAM side using burst
+ * transfers we enable the SBE bit and terminate the transfer in our
+ * DMA_PAUSE handler.
+ */
+ mem_xfer = nbpf_xfer_ds(chan->nbpf, size);
+
+ switch (direction) {
+ case DMA_DEV_TO_MEM:
+ can_burst = chan->slave_src_width >= 3;
+ slave_xfer = min(mem_xfer, can_burst ?
+ chan->slave_src_burst : chan->slave_src_width);
+ /*
+ * Is the slave narrower than 64 bits, i.e. isn't using the full
+ * bus width and cannot use bursts?
+ */
+ if (mem_xfer > chan->slave_src_burst && !can_burst)
+ mem_xfer = chan->slave_src_burst;
+ /* Device-to-RAM DMA is unreliable without REQD set */
+ hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) |
+ (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD |
+ NBPF_CHAN_CFG_SBE;
+ break;
+
+ case DMA_MEM_TO_DEV:
+ slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ?
+ chan->slave_dst_burst : chan->slave_dst_width);
+ hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
+ (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD;
+ break;
+
+ case DMA_MEM_TO_MEM:
+ hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM |
+ (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) |
+ (NBPF_CHAN_CFG_DDS & (mem_xfer << 16));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) |
+ NBPF_CHAN_CFG_DMS;
+
+ dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n",
+ __func__, &ldesc->hwdesc_dma_addr, hwdesc->header,
+ hwdesc->config, size, &src, &dst);
+
+ dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc),
+ DMA_TO_DEVICE);
+
+ return 0;
+}
+
+static size_t nbpf_bytes_left(struct nbpf_channel *chan)
+{
+ return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE);
+}
+
+static void nbpf_configure(struct nbpf_device *nbpf)
+{
+ nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT);
+}
+
+static void nbpf_pause(struct nbpf_channel *chan)
+{
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS);
+ /* See comment in nbpf_prep_one() */
+ nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN);
+}
+
+/* Generic part */
+
+/* DMA ENGINE functions */
+static void nbpf_issue_pending(struct dma_chan *dchan)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ unsigned long flags;
+
+ dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (list_empty(&chan->queued))
+ goto unlock;
+
+ list_splice_tail_init(&chan->queued, &chan->active);
+
+ if (!chan->running) {
+ struct nbpf_desc *desc = list_first_entry(&chan->active,
+ struct nbpf_desc, node);
+ if (!nbpf_start(desc))
+ chan->running = desc;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static enum dma_status nbpf_tx_status(struct dma_chan *dchan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ enum dma_status status = dma_cookie_status(dchan, cookie, state);
+
+ if (state) {
+ dma_cookie_t running;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ running = chan->running ? chan->running->async_tx.cookie : -EINVAL;
+
+ if (cookie == running) {
+ state->residue = nbpf_bytes_left(chan);
+ dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__,
+ state->residue);
+ } else if (status == DMA_IN_PROGRESS) {
+ struct nbpf_desc *desc;
+ bool found = false;
+
+ list_for_each_entry(desc, &chan->active, node)
+ if (desc->async_tx.cookie == cookie) {
+ found = true;
+ break;
+ }
+
+ if (!found)
+ list_for_each_entry(desc, &chan->queued, node)
+ if (desc->async_tx.cookie == cookie) {
+ found = true;
+ break;
+
+ }
+
+ state->residue = found ? desc->length : 0;
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ }
+
+ if (chan->paused)
+ status = DMA_PAUSED;
+
+ return status;
+}
+
+static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx);
+ struct nbpf_channel *chan = desc->chan;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->node, &chan->queued);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie);
+
+ return cookie;
+}
+
+static int nbpf_desc_page_alloc(struct nbpf_channel *chan)
+{
+ struct dma_chan *dchan = &chan->dma_chan;
+ struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ struct nbpf_link_desc *ldesc;
+ struct nbpf_link_reg *hwdesc;
+ struct nbpf_desc *desc;
+ LIST_HEAD(head);
+ LIST_HEAD(lhead);
+ int i;
+ struct device *dev = dchan->device->dev;
+
+ if (!dpage)
+ return -ENOMEM;
+
+ dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n",
+ __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage));
+
+ for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc;
+ i < ARRAY_SIZE(dpage->ldesc);
+ i++, ldesc++, hwdesc++) {
+ ldesc->hwdesc = hwdesc;
+ list_add_tail(&ldesc->node, &lhead);
+ ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev,
+ hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE);
+
+ dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__,
+ hwdesc, &ldesc->hwdesc_dma_addr);
+ }
+
+ for (i = 0, desc = dpage->desc;
+ i < ARRAY_SIZE(dpage->desc);
+ i++, desc++) {
+ dma_async_tx_descriptor_init(&desc->async_tx, dchan);
+ desc->async_tx.tx_submit = nbpf_tx_submit;
+ desc->chan = chan;
+ INIT_LIST_HEAD(&desc->sg);
+ list_add_tail(&desc->node, &head);
+ }
+
+ /*
+ * This function cannot be called from interrupt context, so, no need to
+ * save flags
+ */
+ spin_lock_irq(&chan->lock);
+ list_splice_tail(&lhead, &chan->free_links);
+ list_splice_tail(&head, &chan->free);
+ list_add(&dpage->node, &chan->desc_page);
+ spin_unlock_irq(&chan->lock);
+
+ return ARRAY_SIZE(dpage->desc);
+}
+
+static void nbpf_desc_put(struct nbpf_desc *desc)
+{
+ struct nbpf_channel *chan = desc->chan;
+ struct nbpf_link_desc *ldesc, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ list_for_each_entry_safe(ldesc, tmp, &desc->sg, node)
+ list_move(&ldesc->node, &chan->free_links);
+
+ list_add(&desc->node, &chan->free);
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+static void nbpf_scan_acked(struct nbpf_channel *chan)
+{
+ struct nbpf_desc *desc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->lock, flags);
+ list_for_each_entry_safe(desc, tmp, &chan->done, node)
+ if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) {
+ list_move(&desc->node, &head);
+ desc->user_wait = false;
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, tmp, &head, node) {
+ list_del(&desc->node);
+ nbpf_desc_put(desc);
+ }
+}
+
+/*
+ * We have to allocate descriptors with the channel lock dropped. This means,
+ * before we re-acquire the lock buffers can be taken already, so we have to
+ * re-check after re-acquiring the lock and possibly retry, if buffers are gone
+ * again.
+ */
+static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len)
+{
+ struct nbpf_desc *desc = NULL;
+ struct nbpf_link_desc *ldesc, *prev = NULL;
+
+ nbpf_scan_acked(chan);
+
+ spin_lock_irq(&chan->lock);
+
+ do {
+ int i = 0, ret;
+
+ if (list_empty(&chan->free)) {
+ /* No more free descriptors */
+ spin_unlock_irq(&chan->lock);
+ ret = nbpf_desc_page_alloc(chan);
+ if (ret < 0)
+ return NULL;
+ spin_lock_irq(&chan->lock);
+ continue;
+ }
+ desc = list_first_entry(&chan->free, struct nbpf_desc, node);
+ list_del(&desc->node);
+
+ do {
+ if (list_empty(&chan->free_links)) {
+ /* No more free link descriptors */
+ spin_unlock_irq(&chan->lock);
+ ret = nbpf_desc_page_alloc(chan);
+ if (ret < 0) {
+ nbpf_desc_put(desc);
+ return NULL;
+ }
+ spin_lock_irq(&chan->lock);
+ continue;
+ }
+
+ ldesc = list_first_entry(&chan->free_links,
+ struct nbpf_link_desc, node);
+ ldesc->desc = desc;
+ if (prev)
+ prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr;
+
+ prev = ldesc;
+ list_move_tail(&ldesc->node, &desc->sg);
+
+ i++;
+ } while (i < len);
+ } while (!desc);
+
+ prev->hwdesc->next = 0;
+
+ spin_unlock_irq(&chan->lock);
+
+ return desc;
+}
+
+static void nbpf_chan_idle(struct nbpf_channel *chan)
+{
+ struct nbpf_desc *desc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ list_splice_init(&chan->done, &head);
+ list_splice_init(&chan->active, &head);
+ list_splice_init(&chan->queued, &head);
+
+ chan->running = NULL;
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ list_for_each_entry_safe(desc, tmp, &head, node) {
+ dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n",
+ __func__, desc, desc->async_tx.cookie);
+ list_del(&desc->node);
+ nbpf_desc_put(desc);
+ }
+}
+
+static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct dma_slave_config *config;
+
+ dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ dev_dbg(dchan->device->dev, "Terminating\n");
+ nbpf_chan_halt(chan);
+ nbpf_chan_idle(chan);
+ break;
+
+ case DMA_SLAVE_CONFIG:
+ if (!arg)
+ return -EINVAL;
+ config = (struct dma_slave_config *)arg;
+
+ /*
+ * We could check config->slave_id to match chan->terminal here,
+ * but with DT they would be coming from the same source, so
+ * such a check would be superflous
+ */
+
+ chan->slave_dst_addr = config->dst_addr;
+ chan->slave_dst_width = nbpf_xfer_size(chan->nbpf,
+ config->dst_addr_width, 1);
+ chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf,
+ config->dst_addr_width,
+ config->dst_maxburst);
+ chan->slave_src_addr = config->src_addr;
+ chan->slave_src_width = nbpf_xfer_size(chan->nbpf,
+ config->src_addr_width, 1);
+ chan->slave_src_burst = nbpf_xfer_size(chan->nbpf,
+ config->src_addr_width,
+ config->src_maxburst);
+ break;
+
+ case DMA_PAUSE:
+ chan->paused = true;
+ nbpf_pause(chan);
+ break;
+
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan,
+ struct scatterlist *src_sg, struct scatterlist *dst_sg,
+ size_t len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct nbpf_link_desc *ldesc;
+ struct scatterlist *mem_sg;
+ struct nbpf_desc *desc;
+ bool inc_src, inc_dst;
+ size_t data_len = 0;
+ int i = 0;
+
+ switch (direction) {
+ case DMA_DEV_TO_MEM:
+ mem_sg = dst_sg;
+ inc_src = false;
+ inc_dst = true;
+ break;
+
+ case DMA_MEM_TO_DEV:
+ mem_sg = src_sg;
+ inc_src = true;
+ inc_dst = false;
+ break;
+
+ default:
+ case DMA_MEM_TO_MEM:
+ mem_sg = src_sg;
+ inc_src = true;
+ inc_dst = true;
+ }
+
+ desc = nbpf_desc_get(chan, len);
+ if (!desc)
+ return NULL;
+
+ desc->async_tx.flags = flags;
+ desc->async_tx.cookie = -EBUSY;
+ desc->user_wait = false;
+
+ /*
+ * This is a private descriptor list, and we own the descriptor. No need
+ * to lock.
+ */
+ list_for_each_entry(ldesc, &desc->sg, node) {
+ int ret = nbpf_prep_one(ldesc, direction,
+ sg_dma_address(src_sg),
+ sg_dma_address(dst_sg),
+ sg_dma_len(mem_sg),
+ i == len - 1);
+ if (ret < 0) {
+ nbpf_desc_put(desc);
+ return NULL;
+ }
+ data_len += sg_dma_len(mem_sg);
+ if (inc_src)
+ src_sg = sg_next(src_sg);
+ if (inc_dst)
+ dst_sg = sg_next(dst_sg);
+ mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg;
+ i++;
+ }
+
+ desc->length = data_len;
+
+ /* The user has to return the descriptor to us ASAP via .tx_submit() */
+ return &desc->async_tx;
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_memcpy(
+ struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct scatterlist dst_sg;
+ struct scatterlist src_sg;
+
+ sg_init_table(&dst_sg, 1);
+ sg_init_table(&src_sg, 1);
+
+ sg_dma_address(&dst_sg) = dst;
+ sg_dma_address(&src_sg) = src;
+
+ sg_dma_len(&dst_sg) = len;
+ sg_dma_len(&src_sg) = len;
+
+ dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n",
+ __func__, len, &src, &dst);
+
+ return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1,
+ DMA_MEM_TO_MEM, flags);
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg(
+ struct dma_chan *dchan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+
+ if (dst_nents != src_nents)
+ return NULL;
+
+ return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents,
+ DMA_MEM_TO_MEM, flags);
+}
+
+static struct dma_async_tx_descriptor *nbpf_prep_slave_sg(
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct scatterlist slave_sg;
+
+ dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
+
+ sg_init_table(&slave_sg, 1);
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ sg_dma_address(&slave_sg) = chan->slave_dst_addr;
+ return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len,
+ direction, flags);
+
+ case DMA_DEV_TO_MEM:
+ sg_dma_address(&slave_sg) = chan->slave_src_addr;
+ return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len,
+ direction, flags);
+
+ default:
+ return NULL;
+ }
+}
+
+static int nbpf_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ int ret;
+
+ INIT_LIST_HEAD(&chan->free);
+ INIT_LIST_HEAD(&chan->free_links);
+ INIT_LIST_HEAD(&chan->queued);
+ INIT_LIST_HEAD(&chan->active);
+ INIT_LIST_HEAD(&chan->done);
+
+ ret = nbpf_desc_page_alloc(chan);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__,
+ chan->terminal);
+
+ nbpf_chan_configure(chan);
+
+ return ret;
+}
+
+static void nbpf_free_chan_resources(struct dma_chan *dchan)
+{
+ struct nbpf_channel *chan = nbpf_to_chan(dchan);
+ struct nbpf_desc_page *dpage, *tmp;
+
+ dev_dbg(dchan->device->dev, "Entry %s()\n", __func__);
+
+ nbpf_chan_halt(chan);
+ nbpf_chan_idle(chan);
+ /* Clean up for if a channel is re-used for MEMCPY after slave DMA */
+ nbpf_chan_prepare_default(chan);
+
+ list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) {
+ struct nbpf_link_desc *ldesc;
+ int i;
+ list_del(&dpage->node);
+ for (i = 0, ldesc = dpage->ldesc;
+ i < ARRAY_SIZE(dpage->ldesc);
+ i++, ldesc++)
+ dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr,
+ sizeof(*ldesc->hwdesc), DMA_TO_DEVICE);
+ free_page((unsigned long)dpage);
+ }
+}
+
+static int nbpf_slave_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ caps->src_addr_widths = NBPF_DMA_BUSWIDTHS;
+ caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS;
+ caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ caps->cmd_pause = false;
+ caps->cmd_terminate = true;
+
+ return 0;
+}
+
+static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct nbpf_device *nbpf = ofdma->of_dma_data;
+ struct dma_chan *dchan;
+ struct nbpf_channel *chan;
+
+ if (dma_spec->args_count != 2)
+ return NULL;
+
+ dchan = dma_get_any_slave_channel(&nbpf->dma_dev);
+ if (!dchan)
+ return NULL;
+
+ dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__,
+ dma_spec->np->name);
+
+ chan = nbpf_to_chan(dchan);
+
+ chan->terminal = dma_spec->args[0];
+ chan->flags = dma_spec->args[1];
+
+ nbpf_chan_prepare(chan);
+ nbpf_chan_configure(chan);
+
+ return dchan;
+}
+
+static void nbpf_chan_tasklet(unsigned long data)
+{
+ struct nbpf_channel *chan = (struct nbpf_channel *)data;
+ struct nbpf_desc *desc, *tmp;
+ dma_async_tx_callback callback;
+ void *param;
+
+ while (!list_empty(&chan->done)) {
+ bool found = false, must_put, recycling = false;
+
+ spin_lock_irq(&chan->lock);
+
+ list_for_each_entry_safe(desc, tmp, &chan->done, node) {
+ if (!desc->user_wait) {
+ /* Newly completed descriptor, have to process */
+ found = true;
+ break;
+ } else if (async_tx_test_ack(&desc->async_tx)) {
+ /*
+ * This descriptor was waiting for a user ACK,
+ * it can be recycled now.
+ */
+ list_del(&desc->node);
+ spin_unlock_irq(&chan->lock);
+ nbpf_desc_put(desc);
+ recycling = true;
+ break;
+ }
+ }
+
+ if (recycling)
+ continue;
+
+ if (!found) {
+ /* This can happen if TERMINATE_ALL has been called */
+ spin_unlock_irq(&chan->lock);
+ break;
+ }
+
+ dma_cookie_complete(&desc->async_tx);
+
+ /*
+ * With released lock we cannot dereference desc, maybe it's
+ * still on the "done" list
+ */
+ if (async_tx_test_ack(&desc->async_tx)) {
+ list_del(&desc->node);
+ must_put = true;
+ } else {
+ desc->user_wait = true;
+ must_put = false;
+ }
+
+ callback = desc->async_tx.callback;
+ param = desc->async_tx.callback_param;
+
+ /* ack and callback completed descriptor */
+ spin_unlock_irq(&chan->lock);
+
+ if (callback)
+ callback(param);
+
+ if (must_put)
+ nbpf_desc_put(desc);
+ }
+}
+
+static irqreturn_t nbpf_chan_irq(int irq, void *dev)
+{
+ struct nbpf_channel *chan = dev;
+ bool done = nbpf_status_get(chan);
+ struct nbpf_desc *desc;
+ irqreturn_t ret;
+ bool bh = false;
+
+ if (!done)
+ return IRQ_NONE;
+
+ nbpf_status_ack(chan);
+
+ dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__);
+
+ spin_lock(&chan->lock);
+ desc = chan->running;
+ if (WARN_ON(!desc)) {
+ ret = IRQ_NONE;
+ goto unlock;
+ } else {
+ ret = IRQ_HANDLED;
+ bh = true;
+ }
+
+ list_move_tail(&desc->node, &chan->done);
+ chan->running = NULL;
+
+ if (!list_empty(&chan->active)) {
+ desc = list_first_entry(&chan->active,
+ struct nbpf_desc, node);
+ if (!nbpf_start(desc))
+ chan->running = desc;
+ }
+
+unlock:
+ spin_unlock(&chan->lock);
+
+ if (bh)
+ tasklet_schedule(&chan->tasklet);
+
+ return ret;
+}
+
+static irqreturn_t nbpf_err_irq(int irq, void *dev)
+{
+ struct nbpf_device *nbpf = dev;
+ u32 error = nbpf_error_get(nbpf);
+
+ dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq);
+
+ if (!error)
+ return IRQ_NONE;
+
+ do {
+ struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error);
+ /* On error: abort all queued transfers, no callback */
+ nbpf_error_clear(chan);
+ nbpf_chan_idle(chan);
+ error = nbpf_error_get(nbpf);
+ } while (error);
+
+ return IRQ_HANDLED;
+}
+
+static int nbpf_chan_probe(struct nbpf_device *nbpf, int n)
+{
+ struct dma_device *dma_dev = &nbpf->dma_dev;
+ struct nbpf_channel *chan = nbpf->chan + n;
+ int ret;
+
+ chan->nbpf = nbpf;
+ chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n;
+ INIT_LIST_HEAD(&chan->desc_page);
+ spin_lock_init(&chan->lock);
+ chan->dma_chan.device = dma_dev;
+ dma_cookie_init(&chan->dma_chan);
+ nbpf_chan_prepare_default(chan);
+
+ dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base);
+
+ snprintf(chan->name, sizeof(chan->name), "nbpf %d", n);
+
+ tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan);
+ ret = devm_request_irq(dma_dev->dev, chan->irq,
+ nbpf_chan_irq, IRQF_SHARED,
+ chan->name, chan);
+ if (ret < 0)
+ return ret;
+
+ /* Add the channel to DMA device channel list */
+ list_add_tail(&chan->dma_chan.device_node,
+ &dma_dev->channels);
+
+ return 0;
+}
+
+static const struct of_device_id nbpf_match[] = {
+ {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]},
+ {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]},
+ {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]},
+ {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]},
+ {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]},
+ {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]},
+ {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]},
+ {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]},
+ {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]},
+ {}
+};
+MODULE_DEVICE_TABLE(of, nbpf_match);
+
+static int nbpf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id = of_match_device(nbpf_match, dev);
+ struct device_node *np = dev->of_node;
+ struct nbpf_device *nbpf;
+ struct dma_device *dma_dev;
+ struct resource *iomem, *irq_res;
+ const struct nbpf_config *cfg;
+ int num_channels;
+ int ret, irq, eirq, i;
+ int irqbuf[9] /* maximum 8 channels + error IRQ */;
+ unsigned int irqs = 0;
+
+ BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE);
+
+ /* DT only */
+ if (!np || !of_id || !of_id->data)
+ return -ENODEV;
+
+ cfg = of_id->data;
+ num_channels = cfg->num_channels;
+
+ nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels *
+ sizeof(nbpf->chan[0]), GFP_KERNEL);
+ if (!nbpf) {
+ dev_err(dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ dma_dev = &nbpf->dma_dev;
+ dma_dev->dev = dev;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ nbpf->base = devm_ioremap_resource(dev, iomem);
+ if (IS_ERR(nbpf->base))
+ return PTR_ERR(nbpf->base);
+
+ nbpf->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(nbpf->clk))
+ return PTR_ERR(nbpf->clk);
+
+ nbpf->config = cfg;
+
+ for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) {
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!irq_res)
+ break;
+
+ for (irq = irq_res->start; irq <= irq_res->end;
+ irq++, irqs++)
+ irqbuf[irqs] = irq;
+ }
+
+ /*
+ * 3 IRQ resource schemes are supported:
+ * 1. 1 shared IRQ for error and all channels
+ * 2. 2 IRQs: one for error and one shared for all channels
+ * 3. 1 IRQ for error and an own IRQ for each channel
+ */
+ if (irqs != 1 && irqs != 2 && irqs != num_channels + 1)
+ return -ENXIO;
+
+ if (irqs == 1) {
+ eirq = irqbuf[0];
+
+ for (i = 0; i <= num_channels; i++)
+ nbpf->chan[i].irq = irqbuf[0];
+ } else {
+ eirq = platform_get_irq_byname(pdev, "error");
+ if (eirq < 0)
+ return eirq;
+
+ if (irqs == num_channels + 1) {
+ struct nbpf_channel *chan;
+
+ for (i = 0, chan = nbpf->chan; i <= num_channels;
+ i++, chan++) {
+ /* Skip the error IRQ */
+ if (irqbuf[i] == eirq)
+ i++;
+ chan->irq = irqbuf[i];
+ }
+
+ if (chan != nbpf->chan + num_channels)
+ return -EINVAL;
+ } else {
+ /* 2 IRQs and more than one channel */
+ if (irqbuf[0] == eirq)
+ irq = irqbuf[1];
+ else
+ irq = irqbuf[0];
+
+ for (i = 0; i <= num_channels; i++)
+ nbpf->chan[i].irq = irq;
+ }
+ }
+
+ ret = devm_request_irq(dev, eirq, nbpf_err_irq,
+ IRQF_SHARED, "dma error", nbpf);
+ if (ret < 0)
+ return ret;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Create DMA Channel */
+ for (i = 0; i < num_channels; i++) {
+ ret = nbpf_chan_probe(nbpf, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+ dma_cap_set(DMA_SG, dma_dev->cap_mask);
+
+ /* Common and MEMCPY operations */
+ dma_dev->device_alloc_chan_resources
+ = nbpf_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = nbpf_free_chan_resources;
+ dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg;
+ dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy;
+ dma_dev->device_tx_status = nbpf_tx_status;
+ dma_dev->device_issue_pending = nbpf_issue_pending;
+ dma_dev->device_slave_caps = nbpf_slave_caps;
+
+ /*
+ * If we drop support for unaligned MEMCPY buffer addresses and / or
+ * lengths by setting
+ * dma_dev->copy_align = 4;
+ * then we can set transfer length to 4 bytes in nbpf_prep_one() for
+ * DMA_MEM_TO_MEM
+ */
+
+ /* Compulsory for DMA_SLAVE fields */
+ dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg;
+ dma_dev->device_control = nbpf_control;
+
+ platform_set_drvdata(pdev, nbpf);
+
+ ret = clk_prepare_enable(nbpf->clk);
+ if (ret < 0)
+ return ret;
+
+ nbpf_configure(nbpf);
+
+ ret = dma_async_device_register(dma_dev);
+ if (ret < 0)
+ goto e_clk_off;
+
+ ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf);
+ if (ret < 0)
+ goto e_dma_dev_unreg;
+
+ return 0;
+
+e_dma_dev_unreg:
+ dma_async_device_unregister(dma_dev);
+e_clk_off:
+ clk_disable_unprepare(nbpf->clk);
+
+ return ret;
+}
+
+static int nbpf_remove(struct platform_device *pdev)
+{
+ struct nbpf_device *nbpf = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&nbpf->dma_dev);
+ clk_disable_unprepare(nbpf->clk);
+
+ return 0;
+}
+
+static struct platform_device_id nbpf_ids[] = {
+ {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]},
+ {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]},
+ {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]},
+ {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]},
+ {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]},
+ {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]},
+ {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]},
+ {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]},
+ {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]},
+ {},
+};
+MODULE_DEVICE_TABLE(platform, nbpf_ids);
+
+#ifdef CONFIG_PM_RUNTIME
+static int nbpf_runtime_suspend(struct device *dev)
+{
+ struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ clk_disable_unprepare(nbpf->clk);
+ return 0;
+}
+
+static int nbpf_runtime_resume(struct device *dev)
+{
+ struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev));
+ return clk_prepare_enable(nbpf->clk);
+}
+#endif
+
+static const struct dev_pm_ops nbpf_pm_ops = {
+ SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
+};
+
+static struct platform_driver nbpf_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dma-nbpf",
+ .of_match_table = nbpf_match,
+ .pm = &nbpf_pm_ops,
+ },
+ .id_table = nbpf_ids,
+ .probe = nbpf_probe,
+ .remove = nbpf_remove,
+};
+
+module_platform_driver(nbpf_driver);
+
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index e8fe9dc455f4..d5fbeaa1e7ba 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -218,3 +218,38 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
&dma_spec->args[0]);
}
EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
+
+/**
+ * of_dma_xlate_by_chan_id - Translate dt property to DMA channel by channel id
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ * @of_dma: pointer to DMA controller data
+ *
+ * This function can be used as the of xlate callback for DMA driver which wants
+ * to match the channel based on the channel id. When using this xlate function
+ * the #dma-cells propety of the DMA controller dt node needs to be set to 1.
+ * The data parameter of of_dma_controller_register must be a pointer to the
+ * dma_device struct the function should match upon.
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_device *dev = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate = NULL;
+
+ if (!dev || dma_spec->args_count != 1)
+ return NULL;
+
+ list_for_each_entry(chan, &dev->channels, device_node)
+ if (chan->chan_id == dma_spec->args[0]) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ return dma_get_slave_channel(candidate);
+}
+EXPORT_SYMBOL_GPL(of_dma_xlate_by_chan_id);
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index b19f04f4390b..4cf7d9a950d7 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -853,8 +853,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction dir, unsigned long flags,
- void *context)
+ size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
{
struct omap_dmadev *od = to_omap_dma_dev(chan->device);
struct omap_chan *c = to_omap_dma_chan(chan);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 73fa9b7a10ab..d5149aacd2fe 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -33,26 +33,15 @@
#define PL330_MAX_IRQS 32
#define PL330_MAX_PERI 32
-enum pl330_srccachectrl {
- SCCTRL0, /* Noncacheable and nonbufferable */
- SCCTRL1, /* Bufferable only */
- SCCTRL2, /* Cacheable, but do not allocate */
- SCCTRL3, /* Cacheable and bufferable, but do not allocate */
- SINVALID1,
- SINVALID2,
- SCCTRL6, /* Cacheable write-through, allocate on reads only */
- SCCTRL7, /* Cacheable write-back, allocate on reads only */
-};
-
-enum pl330_dstcachectrl {
- DCCTRL0, /* Noncacheable and nonbufferable */
- DCCTRL1, /* Bufferable only */
- DCCTRL2, /* Cacheable, but do not allocate */
- DCCTRL3, /* Cacheable and bufferable, but do not allocate */
- DINVALID1, /* AWCACHE = 0x1000 */
- DINVALID2,
- DCCTRL6, /* Cacheable write-through, allocate on writes only */
- DCCTRL7, /* Cacheable write-back, allocate on writes only */
+enum pl330_cachectrl {
+ CCTRL0, /* Noncacheable and nonbufferable */
+ CCTRL1, /* Bufferable only */
+ CCTRL2, /* Cacheable, but do not allocate */
+ CCTRL3, /* Cacheable and bufferable, but do not allocate */
+ INVALID1, /* AWCACHE = 0x1000 */
+ INVALID2,
+ CCTRL6, /* Cacheable write-through, allocate on writes only */
+ CCTRL7, /* Cacheable write-back, allocate on writes only */
};
enum pl330_byteswap {
@@ -63,13 +52,6 @@ enum pl330_byteswap {
SWAP_16,
};
-enum pl330_reqtype {
- MEMTOMEM,
- MEMTODEV,
- DEVTOMEM,
- DEVTODEV,
-};
-
/* Register and Bit field Definitions */
#define DS 0x0
#define DS_ST_STOP 0x0
@@ -263,9 +245,6 @@ enum pl330_reqtype {
*/
#define MCODE_BUFF_PER_REQ 256
-/* If the _pl330_req is available to the client */
-#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
-
/* Use this _only_ to wait on transient states */
#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
@@ -300,27 +279,6 @@ struct pl330_config {
u32 irq_ns;
};
-/* Handle to the DMAC provided to the PL330 core */
-struct pl330_info {
- /* Owning device */
- struct device *dev;
- /* Size of MicroCode buffers for each channel. */
- unsigned mcbufsz;
- /* ioremap'ed address of PL330 registers. */
- void __iomem *base;
- /* Client can freely use it. */
- void *client_data;
- /* PL330 core data, Client must not touch it. */
- void *pl330_data;
- /* Populated by the PL330 core driver during pl330_add */
- struct pl330_config pcfg;
- /*
- * If the DMAC has some reset mechanism, then the
- * client may want to provide pointer to the method.
- */
- void (*dmac_reset)(struct pl330_info *pi);
-};
-
/**
* Request Configuration.
* The PL330 core does not modify this and uses the last
@@ -344,8 +302,8 @@ struct pl330_reqcfg {
unsigned brst_len:5;
unsigned brst_size:3; /* in power of 2 */
- enum pl330_dstcachectrl dcctl;
- enum pl330_srccachectrl scctl;
+ enum pl330_cachectrl dcctl;
+ enum pl330_cachectrl scctl;
enum pl330_byteswap swap;
struct pl330_config *pcfg;
};
@@ -359,11 +317,6 @@ struct pl330_xfer {
u32 dst_addr;
/* Size to xfer */
u32 bytes;
- /*
- * Pointer to next xfer in the list.
- * The last xfer in the req must point to NULL.
- */
- struct pl330_xfer *next;
};
/* The xfer callbacks are made with one of these arguments. */
@@ -376,67 +329,6 @@ enum pl330_op_err {
PL330_ERR_FAIL,
};
-/* A request defining Scatter-Gather List ending with NULL xfer. */
-struct pl330_req {
- enum pl330_reqtype rqtype;
- /* Index of peripheral for the xfer. */
- unsigned peri:5;
- /* Unique token for this xfer, set by the client. */
- void *token;
- /* Callback to be called after xfer. */
- void (*xfer_cb)(void *token, enum pl330_op_err err);
- /* If NULL, req will be done at last set parameters. */
- struct pl330_reqcfg *cfg;
- /* Pointer to first xfer in the request. */
- struct pl330_xfer *x;
- /* Hook to attach to DMAC's list of reqs with due callback */
- struct list_head rqd;
-};
-
-/*
- * To know the status of the channel and DMAC, the client
- * provides a pointer to this structure. The PL330 core
- * fills it with current information.
- */
-struct pl330_chanstatus {
- /*
- * If the DMAC engine halted due to some error,
- * the client should remove-add DMAC.
- */
- bool dmac_halted;
- /*
- * If channel is halted due to some error,
- * the client should ABORT/FLUSH and START the channel.
- */
- bool faulting;
- /* Location of last load */
- u32 src_addr;
- /* Location of last store */
- u32 dst_addr;
- /*
- * Pointer to the currently active req, NULL if channel is
- * inactive, even though the requests may be present.
- */
- struct pl330_req *top_req;
- /* Pointer to req waiting second in the queue if any. */
- struct pl330_req *wait_req;
-};
-
-enum pl330_chan_op {
- /* Start the channel */
- PL330_OP_START,
- /* Abort the active xfer */
- PL330_OP_ABORT,
- /* Stop xfer and flush queue */
- PL330_OP_FLUSH,
-};
-
-struct _xfer_spec {
- u32 ccr;
- struct pl330_req *r;
- struct pl330_xfer *x;
-};
-
enum dmamov_dst {
SAR = 0,
CCR,
@@ -454,12 +346,12 @@ enum pl330_cond {
ALWAYS,
};
+struct dma_pl330_desc;
+
struct _pl330_req {
u32 mc_bus;
void *mc_cpu;
- /* Number of bytes taken to setup MC for the req */
- u32 mc_len;
- struct pl330_req *r;
+ struct dma_pl330_desc *desc;
};
/* ToBeDone for tasklet */
@@ -491,30 +383,6 @@ enum pl330_dmac_state {
DYING,
};
-/* A DMAC */
-struct pl330_dmac {
- spinlock_t lock;
- /* Holds list of reqs with due callbacks */
- struct list_head req_done;
- /* Pointer to platform specific stuff */
- struct pl330_info *pinfo;
- /* Maximum possible events/irqs */
- int events[32];
- /* BUS address of MicroCode buffer */
- dma_addr_t mcode_bus;
- /* CPU address of MicroCode buffer */
- void *mcode_cpu;
- /* List of all Channel threads */
- struct pl330_thread *channels;
- /* Pointer to the MANAGER thread */
- struct pl330_thread *manager;
- /* To handle bad news in interrupt */
- struct tasklet_struct tasks;
- struct _pl330_tbd dmac_tbd;
- /* State of DMAC operation */
- enum pl330_dmac_state state;
-};
-
enum desc_status {
/* In the DMAC pool */
FREE,
@@ -555,15 +423,16 @@ struct dma_pl330_chan {
* As the parent, this DMAC also provides descriptors
* to the channel.
*/
- struct dma_pl330_dmac *dmac;
+ struct pl330_dmac *dmac;
/* To protect channel manipulation */
spinlock_t lock;
- /* Token of a hardware channel thread of PL330 DMAC
- * NULL if the channel is available to be acquired.
+ /*
+ * Hardware channel thread of PL330 DMAC. NULL if the channel is
+ * available.
*/
- void *pl330_chid;
+ struct pl330_thread *thread;
/* For D-to-M and M-to-D channels */
int burst_sz; /* the peripheral fifo width */
@@ -574,9 +443,7 @@ struct dma_pl330_chan {
bool cyclic;
};
-struct dma_pl330_dmac {
- struct pl330_info pif;
-
+struct pl330_dmac {
/* DMA-Engine Device */
struct dma_device ddma;
@@ -588,6 +455,32 @@ struct dma_pl330_dmac {
/* To protect desc_pool manipulation */
spinlock_t pool_lock;
+ /* Size of MicroCode buffers for each channel. */
+ unsigned mcbufsz;
+ /* ioremap'ed address of PL330 registers. */
+ void __iomem *base;
+ /* Populated by the PL330 core driver during pl330_add */
+ struct pl330_config pcfg;
+
+ spinlock_t lock;
+ /* Maximum possible events/irqs */
+ int events[32];
+ /* BUS address of MicroCode buffer */
+ dma_addr_t mcode_bus;
+ /* CPU address of MicroCode buffer */
+ void *mcode_cpu;
+ /* List of all Channel threads */
+ struct pl330_thread *channels;
+ /* Pointer to the MANAGER thread */
+ struct pl330_thread *manager;
+ /* To handle bad news in interrupt */
+ struct tasklet_struct tasks;
+ struct _pl330_tbd dmac_tbd;
+ /* State of DMAC operation */
+ enum pl330_dmac_state state;
+ /* Holds list of reqs with due callbacks */
+ struct list_head req_done;
+
/* Peripheral channels connected to this DMAC */
unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
@@ -604,49 +497,43 @@ struct dma_pl330_desc {
struct pl330_xfer px;
struct pl330_reqcfg rqcfg;
- struct pl330_req req;
enum desc_status status;
/* The channel which currently holds this desc */
struct dma_pl330_chan *pchan;
+
+ enum dma_transfer_direction rqtype;
+ /* Index of peripheral for the xfer. */
+ unsigned peri:5;
+ /* Hook to attach to DMAC's list of reqs with due callback */
+ struct list_head rqd;
};
-static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
-{
- if (r && r->xfer_cb)
- r->xfer_cb(r->token, err);
-}
+struct _xfer_spec {
+ u32 ccr;
+ struct dma_pl330_desc *desc;
+};
static inline bool _queue_empty(struct pl330_thread *thrd)
{
- return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1]))
- ? true : false;
+ return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL;
}
static inline bool _queue_full(struct pl330_thread *thrd)
{
- return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1]))
- ? false : true;
+ return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
}
static inline bool is_manager(struct pl330_thread *thrd)
{
- struct pl330_dmac *pl330 = thrd->dmac;
-
- /* MANAGER is indexed at the end */
- if (thrd->id == pl330->pinfo->pcfg.num_chan)
- return true;
- else
- return false;
+ return thrd->dmac->manager == thrd;
}
/* If manager of the thread is in Non-Secure mode */
static inline bool _manager_ns(struct pl330_thread *thrd)
{
- struct pl330_dmac *pl330 = thrd->dmac;
-
- return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false;
+ return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
}
static inline u32 get_revision(u32 periph_id)
@@ -1004,7 +891,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
/* Returns Time-Out */
static bool _until_dmac_idle(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
unsigned long loops = msecs_to_loops(5);
do {
@@ -1024,7 +911,7 @@ static bool _until_dmac_idle(struct pl330_thread *thrd)
static inline void _execute_DBGINSN(struct pl330_thread *thrd,
u8 insn[], bool as_manager)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
u32 val;
val = (insn[0] << 16) | (insn[1] << 24);
@@ -1039,7 +926,7 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
/* If timed out due to halted state-machine */
if (_until_dmac_idle(thrd)) {
- dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n");
+ dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
return;
}
@@ -1047,25 +934,9 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
writel(0, regs + DBGCMD);
}
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-static void mark_free(struct pl330_thread *thrd, int idx)
-{
- struct _pl330_req *req = &thrd->req[idx];
-
- _emit_END(0, req->mc_cpu);
- req->mc_len = 0;
-
- thrd->req_running = -1;
-}
-
static inline u32 _state(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
u32 val;
if (is_manager(thrd))
@@ -1123,7 +994,7 @@ static inline u32 _state(struct pl330_thread *thrd)
static void _stop(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
@@ -1146,9 +1017,9 @@ static void _stop(struct pl330_thread *thrd)
/* Start doing req 'idx' of thread 'thrd' */
static bool _trigger(struct pl330_thread *thrd)
{
- void __iomem *regs = thrd->dmac->pinfo->base;
+ void __iomem *regs = thrd->dmac->base;
struct _pl330_req *req;
- struct pl330_req *r;
+ struct dma_pl330_desc *desc;
struct _arg_GO go;
unsigned ns;
u8 insn[6] = {0, 0, 0, 0, 0, 0};
@@ -1159,32 +1030,27 @@ static bool _trigger(struct pl330_thread *thrd)
return true;
idx = 1 - thrd->lstenq;
- if (!IS_FREE(&thrd->req[idx]))
+ if (thrd->req[idx].desc != NULL) {
req = &thrd->req[idx];
- else {
+ } else {
idx = thrd->lstenq;
- if (!IS_FREE(&thrd->req[idx]))
+ if (thrd->req[idx].desc != NULL)
req = &thrd->req[idx];
else
req = NULL;
}
/* Return if no request */
- if (!req || !req->r)
+ if (!req)
return true;
- r = req->r;
+ desc = req->desc;
- if (r->cfg)
- ns = r->cfg->nonsecure ? 1 : 0;
- else if (readl(regs + CS(thrd->id)) & CS_CNS)
- ns = 1;
- else
- ns = 0;
+ ns = desc->rqcfg.nonsecure ? 1 : 0;
/* See 'Abort Sources' point-4 at Page 2-25 */
if (_manager_ns(thrd) && !ns)
- dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n",
+ dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
__func__, __LINE__);
go.chan = thrd->id;
@@ -1240,7 +1106,7 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs, int cyc)
{
int off = 0;
- struct pl330_config *pcfg = pxs->r->cfg->pcfg;
+ struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
/* check lock-up free version */
if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
@@ -1266,10 +1132,10 @@ static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
int off = 0;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
off += _emit_ST(dry_run, &buf[off], ALWAYS);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
}
return off;
@@ -1281,10 +1147,10 @@ static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
int off = 0;
while (cyc--) {
- off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri);
+ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
off += _emit_LD(dry_run, &buf[off], ALWAYS);
- off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri);
- off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri);
+ off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
}
return off;
@@ -1295,14 +1161,14 @@ static int _bursts(unsigned dry_run, u8 buf[],
{
int off = 0;
- switch (pxs->r->rqtype) {
- case MEMTODEV:
+ switch (pxs->desc->rqtype) {
+ case DMA_MEM_TO_DEV:
off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
break;
- case DEVTOMEM:
+ case DMA_DEV_TO_MEM:
off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
break;
- case MEMTOMEM:
+ case DMA_MEM_TO_MEM:
off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
break;
default:
@@ -1395,7 +1261,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
static inline int _setup_loops(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs)
{
- struct pl330_xfer *x = pxs->x;
+ struct pl330_xfer *x = &pxs->desc->px;
u32 ccr = pxs->ccr;
unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
int off = 0;
@@ -1412,7 +1278,7 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[],
static inline int _setup_xfer(unsigned dry_run, u8 buf[],
const struct _xfer_spec *pxs)
{
- struct pl330_xfer *x = pxs->x;
+ struct pl330_xfer *x = &pxs->desc->px;
int off = 0;
/* DMAMOV SAR, x->src_addr */
@@ -1443,17 +1309,12 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
/* DMAMOV CCR, ccr */
off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
- x = pxs->r->x;
- do {
- /* Error if xfer length is not aligned at burst size */
- if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
- return -EINVAL;
-
- pxs->x = x;
- off += _setup_xfer(dry_run, &buf[off], pxs);
+ x = &pxs->desc->px;
+ /* Error if xfer length is not aligned at burst size */
+ if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
+ return -EINVAL;
- x = x->next;
- } while (x);
+ off += _setup_xfer(dry_run, &buf[off], pxs);
/* DMASEV peripheral/event */
off += _emit_SEV(dry_run, &buf[off], thrd->ev);
@@ -1495,31 +1356,15 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
return ccr;
}
-static inline bool _is_valid(u32 ccr)
-{
- enum pl330_dstcachectrl dcctl;
- enum pl330_srccachectrl scctl;
-
- dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK;
- scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK;
-
- if (dcctl == DINVALID1 || dcctl == DINVALID2
- || scctl == SINVALID1 || scctl == SINVALID2)
- return false;
- else
- return true;
-}
-
/*
* Submit a list of xfers after which the client wants notification.
* Client is not notified after each xfer unit, just once after all
* xfer units are done or some error occurs.
*/
-static int pl330_submit_req(void *ch_id, struct pl330_req *r)
+static int pl330_submit_req(struct pl330_thread *thrd,
+ struct dma_pl330_desc *desc)
{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- struct pl330_info *pi;
+ struct pl330_dmac *pl330 = thrd->dmac;
struct _xfer_spec xs;
unsigned long flags;
void __iomem *regs;
@@ -1528,25 +1373,24 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r)
int ret = 0;
/* No Req or Unacquired Channel or DMAC */
- if (!r || !thrd || thrd->free)
+ if (!desc || !thrd || thrd->free)
return -EINVAL;
- pl330 = thrd->dmac;
- pi = pl330->pinfo;
- regs = pi->base;
+ regs = thrd->dmac->base;
if (pl330->state == DYING
|| pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
- dev_info(thrd->dmac->pinfo->dev, "%s:%d\n",
+ dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
__func__, __LINE__);
return -EAGAIN;
}
/* If request for non-existing peripheral */
- if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) {
- dev_info(thrd->dmac->pinfo->dev,
+ if (desc->rqtype != DMA_MEM_TO_MEM &&
+ desc->peri >= pl330->pcfg.num_peri) {
+ dev_info(thrd->dmac->ddma.dev,
"%s:%d Invalid peripheral(%u)!\n",
- __func__, __LINE__, r->peri);
+ __func__, __LINE__, desc->peri);
return -EINVAL;
}
@@ -1557,41 +1401,26 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r)
goto xfer_exit;
}
+ /* Prefer Secure Channel */
+ if (!_manager_ns(thrd))
+ desc->rqcfg.nonsecure = 0;
+ else
+ desc->rqcfg.nonsecure = 1;
- /* Use last settings, if not provided */
- if (r->cfg) {
- /* Prefer Secure Channel */
- if (!_manager_ns(thrd))
- r->cfg->nonsecure = 0;
- else
- r->cfg->nonsecure = 1;
-
- ccr = _prepare_ccr(r->cfg);
- } else {
- ccr = readl(regs + CC(thrd->id));
- }
-
- /* If this req doesn't have valid xfer settings */
- if (!_is_valid(ccr)) {
- ret = -EINVAL;
- dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n",
- __func__, __LINE__, ccr);
- goto xfer_exit;
- }
+ ccr = _prepare_ccr(&desc->rqcfg);
- idx = IS_FREE(&thrd->req[0]) ? 0 : 1;
+ idx = thrd->req[0].desc == NULL ? 0 : 1;
xs.ccr = ccr;
- xs.r = r;
+ xs.desc = desc;
/* First dry run to check if req is acceptable */
ret = _setup_req(1, thrd, idx, &xs);
if (ret < 0)
goto xfer_exit;
- if (ret > pi->mcbufsz / 2) {
- dev_info(thrd->dmac->pinfo->dev,
- "%s:%d Trying increasing mcbufsz\n",
+ if (ret > pl330->mcbufsz / 2) {
+ dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n",
__func__, __LINE__);
ret = -ENOMEM;
goto xfer_exit;
@@ -1599,8 +1428,8 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r)
/* Hook the request */
thrd->lstenq = idx;
- thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs);
- thrd->req[idx].r = r;
+ thrd->req[idx].desc = desc;
+ _setup_req(0, thrd, idx, &xs);
ret = 0;
@@ -1610,10 +1439,32 @@ xfer_exit:
return ret;
}
+static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
+{
+ struct dma_pl330_chan *pch;
+ unsigned long flags;
+
+ if (!desc)
+ return;
+
+ pch = desc->pchan;
+
+ /* If desc aborted */
+ if (!pch)
+ return;
+
+ spin_lock_irqsave(&pch->lock, flags);
+
+ desc->status = DONE;
+
+ spin_unlock_irqrestore(&pch->lock, flags);
+
+ tasklet_schedule(&pch->task);
+}
+
static void pl330_dotask(unsigned long data)
{
struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
- struct pl330_info *pi = pl330->pinfo;
unsigned long flags;
int i;
@@ -1631,16 +1482,16 @@ static void pl330_dotask(unsigned long data)
if (pl330->dmac_tbd.reset_mngr) {
_stop(pl330->manager);
/* Reset all channels */
- pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1;
+ pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
/* Clear the reset flag */
pl330->dmac_tbd.reset_mngr = false;
}
- for (i = 0; i < pi->pcfg.num_chan; i++) {
+ for (i = 0; i < pl330->pcfg.num_chan; i++) {
if (pl330->dmac_tbd.reset_chan & (1 << i)) {
struct pl330_thread *thrd = &pl330->channels[i];
- void __iomem *regs = pi->base;
+ void __iomem *regs = pl330->base;
enum pl330_op_err err;
_stop(thrd);
@@ -1651,16 +1502,13 @@ static void pl330_dotask(unsigned long data)
err = PL330_ERR_ABORT;
spin_unlock_irqrestore(&pl330->lock, flags);
-
- _callback(thrd->req[1 - thrd->lstenq].r, err);
- _callback(thrd->req[thrd->lstenq].r, err);
-
+ dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
+ dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
spin_lock_irqsave(&pl330->lock, flags);
- thrd->req[0].r = NULL;
- thrd->req[1].r = NULL;
- mark_free(thrd, 0);
- mark_free(thrd, 1);
+ thrd->req[0].desc = NULL;
+ thrd->req[1].desc = NULL;
+ thrd->req_running = -1;
/* Clear the reset flag */
pl330->dmac_tbd.reset_chan &= ~(1 << i);
@@ -1673,20 +1521,15 @@ static void pl330_dotask(unsigned long data)
}
/* Returns 1 if state was updated, 0 otherwise */
-static int pl330_update(const struct pl330_info *pi)
+static int pl330_update(struct pl330_dmac *pl330)
{
- struct pl330_req *rqdone, *tmp;
- struct pl330_dmac *pl330;
+ struct dma_pl330_desc *descdone, *tmp;
unsigned long flags;
void __iomem *regs;
u32 val;
int id, ev, ret = 0;
- if (!pi || !pi->pl330_data)
- return 0;
-
- regs = pi->base;
- pl330 = pi->pl330_data;
+ regs = pl330->base;
spin_lock_irqsave(&pl330->lock, flags);
@@ -1696,13 +1539,13 @@ static int pl330_update(const struct pl330_info *pi)
else
pl330->dmac_tbd.reset_mngr = false;
- val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1);
+ val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
pl330->dmac_tbd.reset_chan |= val;
if (val) {
int i = 0;
- while (i < pi->pcfg.num_chan) {
+ while (i < pl330->pcfg.num_chan) {
if (val & (1 << i)) {
- dev_info(pi->dev,
+ dev_info(pl330->ddma.dev,
"Reset Channel-%d\t CS-%x FTC-%x\n",
i, readl(regs + CS(i)),
readl(regs + FTC(i)));
@@ -1714,15 +1557,16 @@ static int pl330_update(const struct pl330_info *pi)
/* Check which event happened i.e, thread notified */
val = readl(regs + ES);
- if (pi->pcfg.num_events < 32
- && val & ~((1 << pi->pcfg.num_events) - 1)) {
+ if (pl330->pcfg.num_events < 32
+ && val & ~((1 << pl330->pcfg.num_events) - 1)) {
pl330->dmac_tbd.reset_dmac = true;
- dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__);
+ dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
+ __LINE__);
ret = 1;
goto updt_exit;
}
- for (ev = 0; ev < pi->pcfg.num_events; ev++) {
+ for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
if (val & (1 << ev)) { /* Event occurred */
struct pl330_thread *thrd;
u32 inten = readl(regs + INTEN);
@@ -1743,25 +1587,22 @@ static int pl330_update(const struct pl330_info *pi)
continue;
/* Detach the req */
- rqdone = thrd->req[active].r;
- thrd->req[active].r = NULL;
-
- mark_free(thrd, active);
+ descdone = thrd->req[active].desc;
+ thrd->req[active].desc = NULL;
/* Get going again ASAP */
_start(thrd);
/* For now, just make a list of callbacks to be done */
- list_add_tail(&rqdone->rqd, &pl330->req_done);
+ list_add_tail(&descdone->rqd, &pl330->req_done);
}
}
/* Now that we are in no hurry, do the callbacks */
- list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
- list_del(&rqdone->rqd);
-
+ list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) {
+ list_del(&descdone->rqd);
spin_unlock_irqrestore(&pl330->lock, flags);
- _callback(rqdone, PL330_ERR_NONE);
+ dma_pl330_rqcb(descdone, PL330_ERR_NONE);
spin_lock_irqsave(&pl330->lock, flags);
}
@@ -1778,65 +1619,13 @@ updt_exit:
return ret;
}
-static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
-{
- struct pl330_thread *thrd = ch_id;
- struct pl330_dmac *pl330;
- unsigned long flags;
- int ret = 0, active;
-
- if (!thrd || thrd->free || thrd->dmac->state == DYING)
- return -EINVAL;
-
- pl330 = thrd->dmac;
- active = thrd->req_running;
-
- spin_lock_irqsave(&pl330->lock, flags);
-
- switch (op) {
- case PL330_OP_FLUSH:
- /* Make sure the channel is stopped */
- _stop(thrd);
-
- thrd->req[0].r = NULL;
- thrd->req[1].r = NULL;
- mark_free(thrd, 0);
- mark_free(thrd, 1);
- break;
-
- case PL330_OP_ABORT:
- /* Make sure the channel is stopped */
- _stop(thrd);
-
- /* ABORT is only for the active req */
- if (active == -1)
- break;
-
- thrd->req[active].r = NULL;
- mark_free(thrd, active);
-
- /* Start the next */
- case PL330_OP_START:
- if ((active == -1) && !_start(thrd))
- ret = -EIO;
- break;
-
- default:
- ret = -EINVAL;
- }
-
- spin_unlock_irqrestore(&pl330->lock, flags);
- return ret;
-}
-
/* Reserve an event */
static inline int _alloc_event(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
int ev;
- for (ev = 0; ev < pi->pcfg.num_events; ev++)
+ for (ev = 0; ev < pl330->pcfg.num_events; ev++)
if (pl330->events[ev] == -1) {
pl330->events[ev] = thrd->id;
return ev;
@@ -1845,45 +1634,38 @@ static inline int _alloc_event(struct pl330_thread *thrd)
return -1;
}
-static bool _chan_ns(const struct pl330_info *pi, int i)
+static bool _chan_ns(const struct pl330_dmac *pl330, int i)
{
- return pi->pcfg.irq_ns & (1 << i);
+ return pl330->pcfg.irq_ns & (1 << i);
}
/* Upon success, returns IdentityToken for the
* allocated channel, NULL otherwise.
*/
-static void *pl330_request_channel(const struct pl330_info *pi)
+static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
{
struct pl330_thread *thrd = NULL;
- struct pl330_dmac *pl330;
unsigned long flags;
int chans, i;
- if (!pi || !pi->pl330_data)
- return NULL;
-
- pl330 = pi->pl330_data;
-
if (pl330->state == DYING)
return NULL;
- chans = pi->pcfg.num_chan;
+ chans = pl330->pcfg.num_chan;
spin_lock_irqsave(&pl330->lock, flags);
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
if ((thrd->free) && (!_manager_ns(thrd) ||
- _chan_ns(pi, i))) {
+ _chan_ns(pl330, i))) {
thrd->ev = _alloc_event(thrd);
if (thrd->ev >= 0) {
thrd->free = false;
thrd->lstenq = 1;
- thrd->req[0].r = NULL;
- mark_free(thrd, 0);
- thrd->req[1].r = NULL;
- mark_free(thrd, 1);
+ thrd->req[0].desc = NULL;
+ thrd->req[1].desc = NULL;
+ thrd->req_running = -1;
break;
}
}
@@ -1899,17 +1681,15 @@ static void *pl330_request_channel(const struct pl330_info *pi)
static inline void _free_event(struct pl330_thread *thrd, int ev)
{
struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
/* If the event is valid and was held by the thread */
- if (ev >= 0 && ev < pi->pcfg.num_events
+ if (ev >= 0 && ev < pl330->pcfg.num_events
&& pl330->events[ev] == thrd->id)
pl330->events[ev] = -1;
}
-static void pl330_release_channel(void *ch_id)
+static void pl330_release_channel(struct pl330_thread *thrd)
{
- struct pl330_thread *thrd = ch_id;
struct pl330_dmac *pl330;
unsigned long flags;
@@ -1918,8 +1698,8 @@ static void pl330_release_channel(void *ch_id)
_stop(thrd);
- _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT);
- _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT);
+ dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
+ dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
pl330 = thrd->dmac;
@@ -1932,72 +1712,70 @@ static void pl330_release_channel(void *ch_id)
/* Initialize the structure for PL330 configuration, that can be used
* by the client driver the make best use of the DMAC
*/
-static void read_dmac_config(struct pl330_info *pi)
+static void read_dmac_config(struct pl330_dmac *pl330)
{
- void __iomem *regs = pi->base;
+ void __iomem *regs = pl330->base;
u32 val;
val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
val &= CRD_DATA_WIDTH_MASK;
- pi->pcfg.data_bus_width = 8 * (1 << val);
+ pl330->pcfg.data_bus_width = 8 * (1 << val);
val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
val &= CRD_DATA_BUFF_MASK;
- pi->pcfg.data_buf_dep = val + 1;
+ pl330->pcfg.data_buf_dep = val + 1;
val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
val &= CR0_NUM_CHANS_MASK;
val += 1;
- pi->pcfg.num_chan = val;
+ pl330->pcfg.num_chan = val;
val = readl(regs + CR0);
if (val & CR0_PERIPH_REQ_SET) {
val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
val += 1;
- pi->pcfg.num_peri = val;
- pi->pcfg.peri_ns = readl(regs + CR4);
+ pl330->pcfg.num_peri = val;
+ pl330->pcfg.peri_ns = readl(regs + CR4);
} else {
- pi->pcfg.num_peri = 0;
+ pl330->pcfg.num_peri = 0;
}
val = readl(regs + CR0);
if (val & CR0_BOOT_MAN_NS)
- pi->pcfg.mode |= DMAC_MODE_NS;
+ pl330->pcfg.mode |= DMAC_MODE_NS;
else
- pi->pcfg.mode &= ~DMAC_MODE_NS;
+ pl330->pcfg.mode &= ~DMAC_MODE_NS;
val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
val &= CR0_NUM_EVENTS_MASK;
val += 1;
- pi->pcfg.num_events = val;
+ pl330->pcfg.num_events = val;
- pi->pcfg.irq_ns = readl(regs + CR3);
+ pl330->pcfg.irq_ns = readl(regs + CR3);
}
static inline void _reset_thread(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330 = thrd->dmac;
- struct pl330_info *pi = pl330->pinfo;
thrd->req[0].mc_cpu = pl330->mcode_cpu
- + (thrd->id * pi->mcbufsz);
+ + (thrd->id * pl330->mcbufsz);
thrd->req[0].mc_bus = pl330->mcode_bus
- + (thrd->id * pi->mcbufsz);
- thrd->req[0].r = NULL;
- mark_free(thrd, 0);
+ + (thrd->id * pl330->mcbufsz);
+ thrd->req[0].desc = NULL;
thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
- + pi->mcbufsz / 2;
+ + pl330->mcbufsz / 2;
thrd->req[1].mc_bus = thrd->req[0].mc_bus
- + pi->mcbufsz / 2;
- thrd->req[1].r = NULL;
- mark_free(thrd, 1);
+ + pl330->mcbufsz / 2;
+ thrd->req[1].desc = NULL;
+
+ thrd->req_running = -1;
}
static int dmac_alloc_threads(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
+ int chans = pl330->pcfg.num_chan;
struct pl330_thread *thrd;
int i;
@@ -2028,29 +1806,28 @@ static int dmac_alloc_threads(struct pl330_dmac *pl330)
static int dmac_alloc_resources(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
+ int chans = pl330->pcfg.num_chan;
int ret;
/*
* Alloc MicroCode buffer for 'chans' Channel threads.
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
*/
- pl330->mcode_cpu = dma_alloc_coherent(pi->dev,
- chans * pi->mcbufsz,
+ pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
+ chans * pl330->mcbufsz,
&pl330->mcode_bus, GFP_KERNEL);
if (!pl330->mcode_cpu) {
- dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
+ dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
__func__, __LINE__);
return -ENOMEM;
}
ret = dmac_alloc_threads(pl330);
if (ret) {
- dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n",
+ dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
__func__, __LINE__);
- dma_free_coherent(pi->dev,
- chans * pi->mcbufsz,
+ dma_free_coherent(pl330->ddma.dev,
+ chans * pl330->mcbufsz,
pl330->mcode_cpu, pl330->mcode_bus);
return ret;
}
@@ -2058,71 +1835,45 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
return 0;
}
-static int pl330_add(struct pl330_info *pi)
+static int pl330_add(struct pl330_dmac *pl330)
{
- struct pl330_dmac *pl330;
void __iomem *regs;
int i, ret;
- if (!pi || !pi->dev)
- return -EINVAL;
-
- /* If already added */
- if (pi->pl330_data)
- return -EINVAL;
-
- /*
- * If the SoC can perform reset on the DMAC, then do it
- * before reading its configuration.
- */
- if (pi->dmac_reset)
- pi->dmac_reset(pi);
-
- regs = pi->base;
+ regs = pl330->base;
/* Check if we can handle this DMAC */
- if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
- dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id);
+ if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
+ dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
+ pl330->pcfg.periph_id);
return -EINVAL;
}
/* Read the configuration of the DMAC */
- read_dmac_config(pi);
+ read_dmac_config(pl330);
- if (pi->pcfg.num_events == 0) {
- dev_err(pi->dev, "%s:%d Can't work without events!\n",
+ if (pl330->pcfg.num_events == 0) {
+ dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
__func__, __LINE__);
return -EINVAL;
}
- pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL);
- if (!pl330) {
- dev_err(pi->dev, "%s:%d Can't allocate memory!\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- /* Assign the info structure and private data */
- pl330->pinfo = pi;
- pi->pl330_data = pl330;
-
spin_lock_init(&pl330->lock);
INIT_LIST_HEAD(&pl330->req_done);
/* Use default MC buffer size if not provided */
- if (!pi->mcbufsz)
- pi->mcbufsz = MCODE_BUFF_PER_REQ * 2;
+ if (!pl330->mcbufsz)
+ pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
/* Mark all events as free */
- for (i = 0; i < pi->pcfg.num_events; i++)
+ for (i = 0; i < pl330->pcfg.num_events; i++)
pl330->events[i] = -1;
/* Allocate resources needed by the DMAC */
ret = dmac_alloc_resources(pl330);
if (ret) {
- dev_err(pi->dev, "Unable to create channels for DMAC\n");
- kfree(pl330);
+ dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
return ret;
}
@@ -2135,15 +1886,13 @@ static int pl330_add(struct pl330_info *pi)
static int dmac_free_threads(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
struct pl330_thread *thrd;
int i;
/* Release Channel threads */
- for (i = 0; i < chans; i++) {
+ for (i = 0; i < pl330->pcfg.num_chan; i++) {
thrd = &pl330->channels[i];
- pl330_release_channel((void *)thrd);
+ pl330_release_channel(thrd);
}
/* Free memory */
@@ -2152,35 +1901,18 @@ static int dmac_free_threads(struct pl330_dmac *pl330)
return 0;
}
-static void dmac_free_resources(struct pl330_dmac *pl330)
+static void pl330_del(struct pl330_dmac *pl330)
{
- struct pl330_info *pi = pl330->pinfo;
- int chans = pi->pcfg.num_chan;
-
- dmac_free_threads(pl330);
-
- dma_free_coherent(pi->dev, chans * pi->mcbufsz,
- pl330->mcode_cpu, pl330->mcode_bus);
-}
-
-static void pl330_del(struct pl330_info *pi)
-{
- struct pl330_dmac *pl330;
-
- if (!pi || !pi->pl330_data)
- return;
-
- pl330 = pi->pl330_data;
-
pl330->state = UNINIT;
tasklet_kill(&pl330->tasks);
/* Free DMAC resources */
- dmac_free_resources(pl330);
+ dmac_free_threads(pl330);
- kfree(pl330);
- pi->pl330_data = NULL;
+ dma_free_coherent(pl330->ddma.dev,
+ pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
+ pl330->mcode_bus);
}
/* forward declaration */
@@ -2212,8 +1944,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
if (desc->status == BUSY)
continue;
- ret = pl330_submit_req(pch->pl330_chid,
- &desc->req);
+ ret = pl330_submit_req(pch->thread, desc);
if (!ret) {
desc->status = BUSY;
} else if (ret == -EAGAIN) {
@@ -2222,7 +1953,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch)
} else {
/* Unacceptable request */
desc->status = DONE;
- dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
__func__, __LINE__, desc->txd.cookie);
tasklet_schedule(&pch->task);
}
@@ -2249,7 +1980,9 @@ static void pl330_tasklet(unsigned long data)
fill_queue(pch);
/* Make sure the PL330 Channel thread is active */
- pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
+ spin_lock(&pch->thread->dmac->lock);
+ _start(pch->thread);
+ spin_unlock(&pch->thread->dmac->lock);
while (!list_empty(&pch->completed_list)) {
dma_async_tx_callback callback;
@@ -2280,25 +2013,6 @@ static void pl330_tasklet(unsigned long data)
spin_unlock_irqrestore(&pch->lock, flags);
}
-static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
-{
- struct dma_pl330_desc *desc = token;
- struct dma_pl330_chan *pch = desc->pchan;
- unsigned long flags;
-
- /* If desc aborted */
- if (!pch)
- return;
-
- spin_lock_irqsave(&pch->lock, flags);
-
- desc->status = DONE;
-
- spin_unlock_irqrestore(&pch->lock, flags);
-
- tasklet_schedule(&pch->task);
-}
-
bool pl330_filter(struct dma_chan *chan, void *param)
{
u8 *peri_id;
@@ -2315,23 +2029,26 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
int count = dma_spec->args_count;
- struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
+ struct pl330_dmac *pl330 = ofdma->of_dma_data;
unsigned int chan_id;
+ if (!pl330)
+ return NULL;
+
if (count != 1)
return NULL;
chan_id = dma_spec->args[0];
- if (chan_id >= pdmac->num_peripherals)
+ if (chan_id >= pl330->num_peripherals)
return NULL;
- return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
+ return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
}
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
spin_lock_irqsave(&pch->lock, flags);
@@ -2339,8 +2056,8 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
dma_cookie_init(chan);
pch->cyclic = false;
- pch->pl330_chid = pl330_request_channel(&pdmac->pif);
- if (!pch->pl330_chid) {
+ pch->thread = pl330_request_channel(pl330);
+ if (!pch->thread) {
spin_unlock_irqrestore(&pch->lock, flags);
return -ENOMEM;
}
@@ -2357,7 +2074,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
struct dma_pl330_chan *pch = to_pchan(chan);
struct dma_pl330_desc *desc;
unsigned long flags;
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
struct dma_slave_config *slave_config;
LIST_HEAD(list);
@@ -2365,8 +2082,13 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&pch->lock, flags);
- /* FLUSH the PL330 Channel thread */
- pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+ spin_lock(&pl330->lock);
+ _stop(pch->thread);
+ spin_unlock(&pl330->lock);
+
+ pch->thread->req[0].desc = NULL;
+ pch->thread->req[1].desc = NULL;
+ pch->thread->req_running = -1;
/* Mark all desc done */
list_for_each_entry(desc, &pch->submitted_list, node) {
@@ -2384,9 +2106,9 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
dma_cookie_complete(&desc->txd);
}
- list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
- list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
- list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
+ list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
+ list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
+ list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
break;
case DMA_SLAVE_CONFIG:
@@ -2409,7 +2131,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
}
break;
default:
- dev_err(pch->dmac->pif.dev, "Not supported command.\n");
+ dev_err(pch->dmac->ddma.dev, "Not supported command.\n");
return -ENXIO;
}
@@ -2425,8 +2147,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&pch->lock, flags);
- pl330_release_channel(pch->pl330_chid);
- pch->pl330_chid = NULL;
+ pl330_release_channel(pch->thread);
+ pch->thread = NULL;
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
@@ -2489,57 +2211,46 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
static inline void _init_desc(struct dma_pl330_desc *desc)
{
- desc->req.x = &desc->px;
- desc->req.token = desc;
desc->rqcfg.swap = SWAP_NO;
- desc->rqcfg.scctl = SCCTRL0;
- desc->rqcfg.dcctl = DCCTRL0;
- desc->req.cfg = &desc->rqcfg;
- desc->req.xfer_cb = dma_pl330_rqcb;
+ desc->rqcfg.scctl = CCTRL0;
+ desc->rqcfg.dcctl = CCTRL0;
desc->txd.tx_submit = pl330_tx_submit;
INIT_LIST_HEAD(&desc->node);
}
/* Returns the number of descriptors added to the DMAC pool */
-static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
int i;
- if (!pdmac)
- return 0;
-
desc = kcalloc(count, sizeof(*desc), flg);
if (!desc)
return 0;
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
for (i = 0; i < count; i++) {
_init_desc(&desc[i]);
- list_add_tail(&desc[i].node, &pdmac->desc_pool);
+ list_add_tail(&desc[i].node, &pl330->desc_pool);
}
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
return count;
}
-static struct dma_pl330_desc *
-pluck_desc(struct dma_pl330_dmac *pdmac)
+static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330)
{
struct dma_pl330_desc *desc = NULL;
unsigned long flags;
- if (!pdmac)
- return NULL;
-
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
- if (!list_empty(&pdmac->desc_pool)) {
- desc = list_entry(pdmac->desc_pool.next,
+ if (!list_empty(&pl330->desc_pool)) {
+ desc = list_entry(pl330->desc_pool.next,
struct dma_pl330_desc, node);
list_del_init(&desc->node);
@@ -2548,29 +2259,29 @@ pluck_desc(struct dma_pl330_dmac *pdmac)
desc->txd.callback = NULL;
}
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
return desc;
}
static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
{
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
u8 *peri_id = pch->chan.private;
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
- desc = pluck_desc(pdmac);
+ desc = pluck_desc(pl330);
/* If the DMAC pool is empty, alloc new */
if (!desc) {
- if (!add_desc(pdmac, GFP_ATOMIC, 1))
+ if (!add_desc(pl330, GFP_ATOMIC, 1))
return NULL;
/* Try again */
- desc = pluck_desc(pdmac);
+ desc = pluck_desc(pl330);
if (!desc) {
- dev_err(pch->dmac->pif.dev,
+ dev_err(pch->dmac->ddma.dev,
"%s:%d ALERT!\n", __func__, __LINE__);
return NULL;
}
@@ -2581,8 +2292,8 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- desc->req.peri = peri_id ? pch->chan.chan_id : 0;
- desc->rqcfg.pcfg = &pch->dmac->pif.pcfg;
+ desc->peri = peri_id ? pch->chan.chan_id : 0;
+ desc->rqcfg.pcfg = &pch->dmac->pcfg;
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -2592,7 +2303,6 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
static inline void fill_px(struct pl330_xfer *px,
dma_addr_t dst, dma_addr_t src, size_t len)
{
- px->next = NULL;
px->bytes = len;
px->dst_addr = dst;
px->src_addr = src;
@@ -2605,7 +2315,7 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
struct dma_pl330_desc *desc = pl330_get_desc(pch);
if (!desc) {
- dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
return NULL;
}
@@ -2629,11 +2339,11 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
{
struct dma_pl330_chan *pch = desc->pchan;
- struct pl330_info *pi = &pch->dmac->pif;
+ struct pl330_dmac *pl330 = pch->dmac;
int burst_len;
- burst_len = pi->pcfg.data_bus_width / 8;
- burst_len *= pi->pcfg.data_buf_dep;
+ burst_len = pl330->pcfg.data_bus_width / 8;
+ burst_len *= pl330->pcfg.data_buf_dep;
burst_len >>= desc->rqcfg.brst_size;
/* src/dst_burst_len can't be more than 16 */
@@ -2652,11 +2362,11 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
unsigned int i;
dma_addr_t dst;
dma_addr_t src;
@@ -2665,7 +2375,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
return NULL;
if (!is_slave_direction(direction)) {
- dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__);
return NULL;
}
@@ -2673,23 +2383,23 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
for (i = 0; i < len / period_len; i++) {
desc = pl330_get_desc(pch);
if (!desc) {
- dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
__func__, __LINE__);
if (!first)
return NULL;
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
+ list_move_tail(&desc->node, &pl330->desc_pool);
}
- list_move_tail(&first->node, &pdmac->desc_pool);
+ list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
return NULL;
}
@@ -2698,14 +2408,12 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
- desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
@@ -2713,6 +2421,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
break;
}
+ desc->rqtype = direction;
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
fill_px(&desc->px, dst, src, period_len);
@@ -2740,24 +2449,22 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct pl330_info *pi;
+ struct pl330_dmac *pl330 = pch->dmac;
int burst;
if (unlikely(!pch || !len))
return NULL;
- pi = &pch->dmac->pif;
-
desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
if (!desc)
return NULL;
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = MEMTOMEM;
+ desc->rqtype = DMA_MEM_TO_MEM;
/* Select max possible burst size */
- burst = pi->pcfg.data_bus_width / 8;
+ burst = pl330->pcfg.data_bus_width / 8;
while (burst > 1) {
if (!(len % burst))
@@ -2776,7 +2483,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
return &desc->txd;
}
-static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
+static void __pl330_giveback_desc(struct pl330_dmac *pl330,
struct dma_pl330_desc *first)
{
unsigned long flags;
@@ -2785,17 +2492,17 @@ static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
if (!first)
return;
- spin_lock_irqsave(&pdmac->pool_lock, flags);
+ spin_lock_irqsave(&pl330->pool_lock, flags);
while (!list_empty(&first->node)) {
desc = list_entry(first->node.next,
struct dma_pl330_desc, node);
- list_move_tail(&desc->node, &pdmac->desc_pool);
+ list_move_tail(&desc->node, &pl330->desc_pool);
}
- list_move_tail(&first->node, &pdmac->desc_pool);
+ list_move_tail(&first->node, &pl330->desc_pool);
- spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+ spin_unlock_irqrestore(&pl330->pool_lock, flags);
}
static struct dma_async_tx_descriptor *
@@ -2820,12 +2527,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
desc = pl330_get_desc(pch);
if (!desc) {
- struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct pl330_dmac *pl330 = pch->dmac;
- dev_err(pch->dmac->pif.dev,
+ dev_err(pch->dmac->ddma.dev,
"%s:%d Unable to fetch desc\n",
__func__, __LINE__);
- __pl330_giveback_desc(pdmac, first);
+ __pl330_giveback_desc(pl330, first);
return NULL;
}
@@ -2838,19 +2545,18 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (direction == DMA_MEM_TO_DEV) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
- desc->req.rqtype = MEMTODEV;
fill_px(&desc->px,
addr, sg_dma_address(sg), sg_dma_len(sg));
} else {
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = DEVTOMEM;
fill_px(&desc->px,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
+ desc->rqtype = direction;
}
/* Return the last desc in the chain */
@@ -2890,9 +2596,9 @@ static int
pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
- struct dma_pl330_dmac *pdmac;
+ struct pl330_config *pcfg;
+ struct pl330_dmac *pl330;
struct dma_pl330_chan *pch, *_p;
- struct pl330_info *pi;
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
@@ -2905,30 +2611,27 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
/* Allocate a new DMAC and its Channels */
- pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
- if (!pdmac) {
+ pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
+ if (!pl330) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
}
- pi = &pdmac->pif;
- pi->dev = &adev->dev;
- pi->pl330_data = NULL;
- pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
+ pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
- pi->base = devm_ioremap_resource(&adev->dev, res);
- if (IS_ERR(pi->base))
- return PTR_ERR(pi->base);
+ pl330->base = devm_ioremap_resource(&adev->dev, res);
+ if (IS_ERR(pl330->base))
+ return PTR_ERR(pl330->base);
- amba_set_drvdata(adev, pdmac);
+ amba_set_drvdata(adev, pl330);
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = adev->irq[i];
if (irq) {
ret = devm_request_irq(&adev->dev, irq,
pl330_irq_handler, 0,
- dev_name(&adev->dev), pi);
+ dev_name(&adev->dev), pl330);
if (ret)
return ret;
} else {
@@ -2936,38 +2639,40 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
}
- pi->pcfg.periph_id = adev->periphid;
- ret = pl330_add(pi);
+ pcfg = &pl330->pcfg;
+
+ pcfg->periph_id = adev->periphid;
+ ret = pl330_add(pl330);
if (ret)
return ret;
- INIT_LIST_HEAD(&pdmac->desc_pool);
- spin_lock_init(&pdmac->pool_lock);
+ INIT_LIST_HEAD(&pl330->desc_pool);
+ spin_lock_init(&pl330->pool_lock);
/* Create a descriptor pool of default size */
- if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
+ if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
dev_warn(&adev->dev, "unable to allocate desc\n");
- pd = &pdmac->ddma;
+ pd = &pl330->ddma;
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
if (pdat)
- num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan);
+ num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan);
else
- num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
+ num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
- pdmac->num_peripherals = num_chan;
+ pl330->num_peripherals = num_chan;
- pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
- if (!pdmac->peripherals) {
+ pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
+ if (!pl330->peripherals) {
ret = -ENOMEM;
- dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
+ dev_err(&adev->dev, "unable to allocate pl330->peripherals\n");
goto probe_err2;
}
for (i = 0; i < num_chan; i++) {
- pch = &pdmac->peripherals[i];
+ pch = &pl330->peripherals[i];
if (!adev->dev.of_node)
pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
else
@@ -2977,9 +2682,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pch->work_list);
INIT_LIST_HEAD(&pch->completed_list);
spin_lock_init(&pch->lock);
- pch->pl330_chid = NULL;
+ pch->thread = NULL;
pch->chan.device = pd;
- pch->dmac = pdmac;
+ pch->dmac = pl330;
/* Add the channel to the DMAC list */
list_add_tail(&pch->chan.device_node, &pd->channels);
@@ -2990,7 +2695,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->cap_mask = pdat->cap_mask;
} else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- if (pi->pcfg.num_peri) {
+ if (pcfg->num_peri) {
dma_cap_set(DMA_SLAVE, pd->cap_mask);
dma_cap_set(DMA_CYCLIC, pd->cap_mask);
dma_cap_set(DMA_PRIVATE, pd->cap_mask);
@@ -3015,14 +2720,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
if (adev->dev.of_node) {
ret = of_dma_controller_register(adev->dev.of_node,
- of_dma_pl330_xlate, pdmac);
+ of_dma_pl330_xlate, pl330);
if (ret) {
dev_err(&adev->dev,
"unable to register DMA to the generic DT DMA helpers\n");
}
}
- adev->dev.dma_parms = &pdmac->dma_parms;
+ adev->dev.dma_parms = &pl330->dma_parms;
/*
* This is the limit for transfers with a buswidth of 1, larger
@@ -3037,14 +2742,13 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
dev_info(&adev->dev,
"\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
- pi->pcfg.data_buf_dep,
- pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
- pi->pcfg.num_peri, pi->pcfg.num_events);
+ pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
+ pcfg->num_peri, pcfg->num_events);
return 0;
probe_err3:
/* Idle the DMAC */
- list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
chan.device_node) {
/* Remove the channel */
@@ -3055,27 +2759,23 @@ probe_err3:
pl330_free_chan_resources(&pch->chan);
}
probe_err2:
- pl330_del(pi);
+ pl330_del(pl330);
return ret;
}
static int pl330_remove(struct amba_device *adev)
{
- struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
+ struct pl330_dmac *pl330 = amba_get_drvdata(adev);
struct dma_pl330_chan *pch, *_p;
- struct pl330_info *pi;
-
- if (!pdmac)
- return 0;
if (adev->dev.of_node)
of_dma_controller_free(adev->dev.of_node);
- dma_async_device_unregister(&pdmac->ddma);
+ dma_async_device_unregister(&pl330->ddma);
/* Idle the DMAC */
- list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
+ list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
chan.device_node) {
/* Remove the channel */
@@ -3086,9 +2786,7 @@ static int pl330_remove(struct amba_device *adev)
pl330_free_chan_resources(&pch->chan);
}
- pi = &pdmac->pif;
-
- pl330_del(pi);
+ pl330_del(pl330);
return 0;
}
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 82c923146e49..7a4bbb0f80a5 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -61,12 +61,17 @@ struct bam_desc_hw {
#define DESC_FLAG_INT BIT(15)
#define DESC_FLAG_EOT BIT(14)
#define DESC_FLAG_EOB BIT(13)
+#define DESC_FLAG_NWD BIT(12)
struct bam_async_desc {
struct virt_dma_desc vd;
u32 num_desc;
u32 xfer_len;
+
+ /* transaction flags, EOT|EOB|NWD */
+ u16 flags;
+
struct bam_desc_hw *curr_desc;
enum dma_transfer_direction dir;
@@ -490,6 +495,14 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
if (!async_desc)
goto err_out;
+ if (flags & DMA_PREP_FENCE)
+ async_desc->flags |= DESC_FLAG_NWD;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ async_desc->flags |= DESC_FLAG_EOT;
+ else
+ async_desc->flags |= DESC_FLAG_INT;
+
async_desc->num_desc = num_alloc;
async_desc->curr_desc = async_desc->desc;
async_desc->dir = direction;
@@ -793,8 +806,11 @@ static void bam_start_dma(struct bam_chan *bchan)
else
async_desc->xfer_len = async_desc->num_desc;
- /* set INT on last descriptor */
- desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+ /* set any special flags on the last descriptor */
+ if (async_desc->num_desc == async_desc->xfer_len)
+ desc[async_desc->xfer_len - 1].flags = async_desc->flags;
+ else
+ desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
u32 partial = MAX_DESCRIPTORS - bchan->tail;
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 012520c9fd79..7416572d1e40 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -889,8 +889,7 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 5ebdfbc1051e..4b0ef043729a 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -612,7 +612,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction dir, unsigned long flags, void *context)
+ enum dma_transfer_direction dir, unsigned long flags)
{
struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
struct sa11x0_dma_desc *txd;
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 0f719816c91b..0349125a2e20 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -2,21 +2,39 @@
# DMA engine configuration for sh
#
+#
+# DMA Engine Helpers
+#
+
config SH_DMAE_BASE
bool "Renesas SuperH DMA Engine support"
- depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ depends on !SUPERH || SH_DMA
depends on !SH_DMA_API
default y
select DMA_ENGINE
help
Enable support for the Renesas SuperH DMA controllers.
+#
+# DMA Controllers
+#
+
config SH_DMAE
tristate "Renesas SuperH DMAC support"
depends on SH_DMAE_BASE
help
Enable support for the Renesas SuperH DMA controllers.
+if SH_DMAE
+
+config SH_DMAE_R8A73A4
+ def_bool y
+ depends on ARCH_R8A73A4
+ depends on OF
+
+endif
+
config SUDMAC
tristate "Renesas SUDMAC support"
depends on SH_DMAE_BASE
@@ -34,7 +52,3 @@ config RCAR_AUDMAC_PP
depends on SH_DMAE_BASE
help
Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
-
-config SHDMA_R8A73A4
- def_bool y
- depends on ARCH_R8A73A4 && SH_DMAE != n
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
index 1ce88b28cfc6..0a5cfdb76e45 100644
--- a/drivers/dma/sh/Makefile
+++ b/drivers/dma/sh/Makefile
@@ -1,10 +1,18 @@
+#
+# DMA Engine Helpers
+#
+
obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o
-obj-$(CONFIG_SH_DMAE) += shdma.o
+
+#
+# DMA Controllers
+#
+
shdma-y := shdmac.o
-ifeq ($(CONFIG_OF),y)
-shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o
-endif
+shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o
shdma-objs := $(shdma-y)
+obj-$(CONFIG_SH_DMAE) += shdma.o
+
obj-$(CONFIG_SUDMAC) += sudmac.o
obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
index 2de77289a2e9..dabbf0aba2e9 100644
--- a/drivers/dma/sh/rcar-audmapp.c
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dmaengine.h>
+#include <linux/of_dma.h>
#include <linux/platform_data/dma-rcar-audmapp.h>
#include <linux/platform_device.h>
#include <linux/shdma-base.h>
@@ -45,8 +46,9 @@
struct audmapp_chan {
struct shdma_chan shdma_chan;
- struct audmapp_slave_config *config;
void __iomem *base;
+ dma_addr_t slave_addr;
+ u32 chcr;
};
struct audmapp_device {
@@ -56,7 +58,16 @@ struct audmapp_device {
void __iomem *chan_reg;
};
+struct audmapp_desc {
+ struct shdma_desc shdma_desc;
+ dma_addr_t src;
+ dma_addr_t dst;
+};
+
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
+
#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
+#define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc)
#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \
struct audmapp_device, shdma_dev.dma_dev)
@@ -90,70 +101,82 @@ static void audmapp_halt(struct shdma_chan *schan)
}
static void audmapp_start_xfer(struct shdma_chan *schan,
- struct shdma_desc *sdecs)
+ struct shdma_desc *sdesc)
{
struct audmapp_chan *auchan = to_chan(schan);
struct audmapp_device *audev = to_dev(auchan);
- struct audmapp_slave_config *cfg = auchan->config;
+ struct audmapp_desc *desc = to_desc(sdesc);
struct device *dev = audev->dev;
- u32 chcr = cfg->chcr | PDMACHCR_DE;
+ u32 chcr = auchan->chcr | PDMACHCR_DE;
- dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
- &cfg->src, &cfg->dst, cfg->chcr);
+ dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n",
+ &desc->src, &desc->dst, chcr);
- audmapp_write(auchan, cfg->src, PDMASAR);
- audmapp_write(auchan, cfg->dst, PDMADAR);
+ audmapp_write(auchan, desc->src, PDMASAR);
+ audmapp_write(auchan, desc->dst, PDMADAR);
audmapp_write(auchan, chcr, PDMACHCR);
}
-static struct audmapp_slave_config *
-audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
+static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id,
+ u32 *chcr, dma_addr_t *dst)
{
struct audmapp_device *audev = to_dev(auchan);
struct audmapp_pdata *pdata = audev->pdata;
struct audmapp_slave_config *cfg;
int i;
+ *chcr = 0;
+ *dst = 0;
+
+ if (!pdata) { /* DT */
+ *chcr = ((u32)slave_id) << 16;
+ auchan->shdma_chan.slave_id = (slave_id) >> 8;
+ return;
+ }
+
+ /* non-DT */
+
if (slave_id >= AUDMAPP_SLAVE_NUMBER)
- return NULL;
+ return;
for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
- if (cfg->slave_id == slave_id)
- return cfg;
-
- return NULL;
+ if (cfg->slave_id == slave_id) {
+ *chcr = cfg->chcr;
+ *dst = cfg->dst;
+ break;
+ }
}
static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
dma_addr_t slave_addr, bool try)
{
struct audmapp_chan *auchan = to_chan(schan);
- struct audmapp_slave_config *cfg =
- audmapp_find_slave(auchan, slave_id);
+ u32 chcr;
+ dma_addr_t dst;
+
+ audmapp_get_config(auchan, slave_id, &chcr, &dst);
- if (!cfg)
- return -ENODEV;
if (try)
return 0;
- auchan->config = cfg;
+ auchan->chcr = chcr;
+ auchan->slave_addr = slave_addr ? : dst;
return 0;
}
static int audmapp_desc_setup(struct shdma_chan *schan,
- struct shdma_desc *sdecs,
+ struct shdma_desc *sdesc,
dma_addr_t src, dma_addr_t dst, size_t *len)
{
- struct audmapp_chan *auchan = to_chan(schan);
- struct audmapp_slave_config *cfg = auchan->config;
-
- if (!cfg)
- return -ENODEV;
+ struct audmapp_desc *desc = to_desc(sdesc);
if (*len > (size_t)AUDMAPP_LEN_MAX)
*len = (size_t)AUDMAPP_LEN_MAX;
+ desc->src = src;
+ desc->dst = dst;
+
return 0;
}
@@ -164,7 +187,9 @@ static void audmapp_setup_xfer(struct shdma_chan *schan,
static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
{
- return 0; /* always fixed address */
+ struct audmapp_chan *auchan = to_chan(schan);
+
+ return auchan->slave_addr;
}
static bool audmapp_channel_busy(struct shdma_chan *schan)
@@ -183,7 +208,7 @@ static bool audmapp_desc_completed(struct shdma_chan *schan,
static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
{
- return &((struct shdma_desc *)buf)[i];
+ return &((struct audmapp_desc *)buf)[i].shdma_desc;
}
static const struct shdma_ops audmapp_shdma_ops = {
@@ -234,16 +259,39 @@ static void audmapp_chan_remove(struct audmapp_device *audev)
dma_dev->chancnt = 0;
}
+static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ u32 chcr = dma_spec->args[0];
+
+ if (dma_spec->args_count != 1)
+ return NULL;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, shdma_chan_filter, NULL);
+ if (chan)
+ to_shdma_chan(chan)->hw_req = chcr;
+
+ return chan;
+}
+
static int audmapp_probe(struct platform_device *pdev)
{
struct audmapp_pdata *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
struct audmapp_device *audev;
struct shdma_dev *sdev;
struct dma_device *dma_dev;
struct resource *res;
int err, i;
- if (!pdata)
+ if (np)
+ of_dma_controller_register(np, audmapp_of_xlate, pdev);
+ else if (!pdata)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -260,7 +308,7 @@ static int audmapp_probe(struct platform_device *pdev)
sdev = &audev->shdma_dev;
sdev->ops = &audmapp_shdma_ops;
- sdev->desc_size = sizeof(struct shdma_desc);
+ sdev->desc_size = sizeof(struct audmapp_desc);
dma_dev = &sdev->dma_dev;
dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
@@ -305,12 +353,18 @@ static int audmapp_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id audmapp_of_match[] = {
+ { .compatible = "renesas,rcar-audmapp", },
+ {},
+};
+
static struct platform_driver audmapp_driver = {
.probe = audmapp_probe,
.remove = audmapp_remove,
.driver = {
.owner = THIS_MODULE,
.name = "rcar-audmapp-engine",
+ .of_match_table = audmapp_of_match,
},
};
module_platform_driver(audmapp_driver);
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
index a2b8258426c9..a1b0ef45d6a2 100644
--- a/drivers/dma/sh/shdma-arm.h
+++ b/drivers/dma/sh/shdma-arm.h
@@ -45,7 +45,7 @@ enum {
((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\
(((i) & TS_HI_BIT) << TS_HI_SHIFT))
-#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz)))
-#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz)))
+#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz)))
#endif
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index b35007e21e6b..42d497416196 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -206,45 +206,6 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id,
return 0;
}
-/*
- * This is the standard shdma filter function to be used as a replacement to the
- * "old" method, using the .private pointer. If for some reason you allocate a
- * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
- * parameter. If this filter is used, the slave driver, after calling
- * dma_request_channel(), will also have to call dmaengine_slave_config() with
- * .slave_id, .direction, and either .src_addr or .dst_addr set.
- * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
- * capability! If this becomes a requirement, hardware glue drivers, using this
- * services would have to provide their own filters, which first would check
- * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
- * this, and only then, in case of a match, call this common filter.
- * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
- * In that case the MID-RID value is used for slave channel filtering and is
- * passed to this function in the "arg" parameter.
- */
-bool shdma_chan_filter(struct dma_chan *chan, void *arg)
-{
- struct shdma_chan *schan = to_shdma_chan(chan);
- struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
- const struct shdma_ops *ops = sdev->ops;
- int match = (long)arg;
- int ret;
-
- if (match < 0)
- /* No slave requested - arbitrary channel */
- return true;
-
- if (!schan->dev->of_node && match >= slave_num)
- return false;
-
- ret = ops->set_slave(schan, match, 0, true);
- if (ret < 0)
- return false;
-
- return true;
-}
-EXPORT_SYMBOL(shdma_chan_filter);
-
static int shdma_alloc_chan_resources(struct dma_chan *chan)
{
struct shdma_chan *schan = to_shdma_chan(chan);
@@ -295,6 +256,51 @@ esetslave:
return ret;
}
+/*
+ * This is the standard shdma filter function to be used as a replacement to the
+ * "old" method, using the .private pointer. If for some reason you allocate a
+ * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
+ * parameter. If this filter is used, the slave driver, after calling
+ * dma_request_channel(), will also have to call dmaengine_slave_config() with
+ * .slave_id, .direction, and either .src_addr or .dst_addr set.
+ * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
+ * capability! If this becomes a requirement, hardware glue drivers, using this
+ * services would have to provide their own filters, which first would check
+ * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
+ * this, and only then, in case of a match, call this common filter.
+ * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
+ * In that case the MID-RID value is used for slave channel filtering and is
+ * passed to this function in the "arg" parameter.
+ */
+bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct shdma_chan *schan;
+ struct shdma_dev *sdev;
+ int match = (long)arg;
+ int ret;
+
+ /* Only support channels handled by this driver. */
+ if (chan->device->device_alloc_chan_resources !=
+ shdma_alloc_chan_resources)
+ return false;
+
+ if (match < 0)
+ /* No slave requested - arbitrary channel */
+ return true;
+
+ schan = to_shdma_chan(chan);
+ if (!schan->dev->of_node && match >= slave_num)
+ return false;
+
+ sdev = to_shdma_dev(schan->dma_chan.device);
+ ret = sdev->ops->set_slave(schan, match, 0, true);
+ if (ret < 0)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(shdma_chan_filter);
+
static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
{
struct shdma_desc *desc, *_desc;
@@ -662,15 +668,16 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ struct dma_async_tx_descriptor *desc;
const struct shdma_ops *ops = sdev->ops;
unsigned int sg_len = buf_len / period_len;
int slave_id = schan->slave_id;
dma_addr_t slave_addr;
- struct scatterlist sgl[SHDMA_MAX_SG_LEN];
+ struct scatterlist *sgl;
int i;
if (!chan)
@@ -694,7 +701,16 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
slave_addr = ops->slave_addr(schan);
+ /*
+ * Allocate the sg list dynamically as it would consumer too much stack
+ * space.
+ */
+ sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
+ if (!sgl)
+ return NULL;
+
sg_init_table(sgl, sg_len);
+
for (i = 0; i < sg_len; i++) {
dma_addr_t src = buf_addr + (period_len * i);
@@ -704,8 +720,11 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
sg_dma_len(&sgl[i]) = period_len;
}
- return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
direction, flags, true);
+
+ kfree(sgl);
+ return desc;
}
static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 758a57b51875..2c0a969adc9f 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -62,7 +62,7 @@ struct sh_dmae_desc {
#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
struct sh_dmae_device, shdma_dev.dma_dev)
-#ifdef CONFIG_SHDMA_R8A73A4
+#ifdef CONFIG_SH_DMAE_R8A73A4
extern const struct sh_dmae_pdata r8a73a4_dma_pdata;
#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata)
#else
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 146d5df926db..58eb85770eba 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -38,12 +38,12 @@
#include "../dmaengine.h"
#include "shdma.h"
-/* DMA register */
-#define SAR 0x00
-#define DAR 0x04
-#define TCR 0x08
-#define CHCR 0x0C
-#define DMAOR 0x40
+/* DMA registers */
+#define SAR 0x00 /* Source Address Register */
+#define DAR 0x04 /* Destination Address Register */
+#define TCR 0x08 /* Transfer Count Register */
+#define CHCR 0x0C /* Channel Control Register */
+#define DMAOR 0x40 /* DMA Operation Register */
#define TEND 0x18 /* USB-DMAC */
@@ -239,9 +239,8 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
{
/*
* Default configuration for dual address memory-memory transfer.
- * 0x400 represents auto-request.
*/
- u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
+ u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
LOG2_DEFAULT_XFER_SIZE);
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
chcr_write(sh_chan, chcr);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index 03f7820fa333..aac03ab10c54 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -580,7 +580,7 @@ err_dir:
static struct dma_async_tx_descriptor *
sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, unsigned long flags, void *context)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
struct sirfsoc_dma_desc *sdesc = NULL;
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index c7984459ede7..5fe59335e247 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2531,8 +2531,7 @@ d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
+ enum dma_transfer_direction direction, unsigned long flags)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
new file mode 100644
index 000000000000..1f92a56fd2b6
--- /dev/null
+++ b/drivers/dma/sun6i-dma.c
@@ -0,0 +1,1053 @@
+/*
+ * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
+ * Author: Sugar <shuge@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "virt-dma.h"
+
+/*
+ * There's 16 physical channels that can work in parallel.
+ *
+ * However we have 30 different endpoints for our requests.
+ *
+ * Since the channels are able to handle only an unidirectional
+ * transfer, we need to allocate more virtual channels so that
+ * everyone can grab one channel.
+ *
+ * Some devices can't work in both direction (mostly because it
+ * wouldn't make sense), so we have a bit fewer virtual channels than
+ * 2 channels per endpoints.
+ */
+
+#define NR_MAX_CHANNELS 16
+#define NR_MAX_REQUESTS 30
+#define NR_MAX_VCHANS 53
+
+/*
+ * Common registers
+ */
+#define DMA_IRQ_EN(x) ((x) * 0x04)
+#define DMA_IRQ_HALF BIT(0)
+#define DMA_IRQ_PKG BIT(1)
+#define DMA_IRQ_QUEUE BIT(2)
+
+#define DMA_IRQ_CHAN_NR 8
+#define DMA_IRQ_CHAN_WIDTH 4
+
+
+#define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10)
+
+#define DMA_STAT 0x30
+
+/*
+ * Channels specific registers
+ */
+#define DMA_CHAN_ENABLE 0x00
+#define DMA_CHAN_ENABLE_START BIT(0)
+#define DMA_CHAN_ENABLE_STOP 0
+
+#define DMA_CHAN_PAUSE 0x04
+#define DMA_CHAN_PAUSE_PAUSE BIT(1)
+#define DMA_CHAN_PAUSE_RESUME 0
+
+#define DMA_CHAN_LLI_ADDR 0x08
+
+#define DMA_CHAN_CUR_CFG 0x0c
+#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f)
+#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
+#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
+#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7)
+#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
+
+#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
+#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
+#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
+#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16)
+#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
+
+#define DMA_CHAN_CUR_SRC 0x10
+
+#define DMA_CHAN_CUR_DST 0x14
+
+#define DMA_CHAN_CUR_CNT 0x18
+
+#define DMA_CHAN_CUR_PARA 0x1c
+
+
+/*
+ * Various hardware related defines
+ */
+#define LLI_LAST_ITEM 0xfffff800
+#define NORMAL_WAIT 8
+#define DRQ_SDRAM 1
+
+/*
+ * Hardware representation of the LLI
+ *
+ * The hardware will be fed the physical address of this structure,
+ * and read its content in order to start the transfer.
+ */
+struct sun6i_dma_lli {
+ u32 cfg;
+ u32 src;
+ u32 dst;
+ u32 len;
+ u32 para;
+ u32 p_lli_next;
+
+ /*
+ * This field is not used by the DMA controller, but will be
+ * used by the CPU to go through the list (mostly for dumping
+ * or freeing it).
+ */
+ struct sun6i_dma_lli *v_lli_next;
+};
+
+
+struct sun6i_desc {
+ struct virt_dma_desc vd;
+ dma_addr_t p_lli;
+ struct sun6i_dma_lli *v_lli;
+};
+
+struct sun6i_pchan {
+ u32 idx;
+ void __iomem *base;
+ struct sun6i_vchan *vchan;
+ struct sun6i_desc *desc;
+ struct sun6i_desc *done;
+};
+
+struct sun6i_vchan {
+ struct virt_dma_chan vc;
+ struct list_head node;
+ struct dma_slave_config cfg;
+ struct sun6i_pchan *phy;
+ u8 port;
+};
+
+struct sun6i_dma_dev {
+ struct dma_device slave;
+ void __iomem *base;
+ struct clk *clk;
+ int irq;
+ spinlock_t lock;
+ struct reset_control *rstc;
+ struct tasklet_struct task;
+ atomic_t tasklet_shutdown;
+ struct list_head pending;
+ struct dma_pool *pool;
+ struct sun6i_pchan *pchans;
+ struct sun6i_vchan *vchans;
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+ return &chan->dev->device;
+}
+
+static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d)
+{
+ return container_of(d, struct sun6i_dma_dev, slave);
+}
+
+static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan)
+{
+ return container_of(chan, struct sun6i_vchan, vc.chan);
+}
+
+static inline struct sun6i_desc *
+to_sun6i_desc(struct dma_async_tx_descriptor *tx)
+{
+ return container_of(tx, struct sun6i_desc, vd.tx);
+}
+
+static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
+{
+ dev_dbg(sdev->slave.dev, "Common register:\n"
+ "\tmask0(%04x): 0x%08x\n"
+ "\tmask1(%04x): 0x%08x\n"
+ "\tpend0(%04x): 0x%08x\n"
+ "\tpend1(%04x): 0x%08x\n"
+ "\tstats(%04x): 0x%08x\n",
+ DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)),
+ DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)),
+ DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)),
+ DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)),
+ DMA_STAT, readl(sdev->base + DMA_STAT));
+}
+
+static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
+ struct sun6i_pchan *pchan)
+{
+ phys_addr_t reg = virt_to_phys(pchan->base);
+
+ dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
+ "\t___en(%04x): \t0x%08x\n"
+ "\tpause(%04x): \t0x%08x\n"
+ "\tstart(%04x): \t0x%08x\n"
+ "\t__cfg(%04x): \t0x%08x\n"
+ "\t__src(%04x): \t0x%08x\n"
+ "\t__dst(%04x): \t0x%08x\n"
+ "\tcount(%04x): \t0x%08x\n"
+ "\t_para(%04x): \t0x%08x\n\n",
+ pchan->idx, &reg,
+ DMA_CHAN_ENABLE,
+ readl(pchan->base + DMA_CHAN_ENABLE),
+ DMA_CHAN_PAUSE,
+ readl(pchan->base + DMA_CHAN_PAUSE),
+ DMA_CHAN_LLI_ADDR,
+ readl(pchan->base + DMA_CHAN_LLI_ADDR),
+ DMA_CHAN_CUR_CFG,
+ readl(pchan->base + DMA_CHAN_CUR_CFG),
+ DMA_CHAN_CUR_SRC,
+ readl(pchan->base + DMA_CHAN_CUR_SRC),
+ DMA_CHAN_CUR_DST,
+ readl(pchan->base + DMA_CHAN_CUR_DST),
+ DMA_CHAN_CUR_CNT,
+ readl(pchan->base + DMA_CHAN_CUR_CNT),
+ DMA_CHAN_CUR_PARA,
+ readl(pchan->base + DMA_CHAN_CUR_PARA));
+}
+
+static inline int convert_burst(u32 maxburst, u8 *burst)
+{
+ switch (maxburst) {
+ case 1:
+ *burst = 0;
+ break;
+ case 8:
+ *burst = 2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width)
+{
+ if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) ||
+ (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
+ return -EINVAL;
+
+ *width = addr_width >> 1;
+ return 0;
+}
+
+static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
+ struct sun6i_dma_lli *next,
+ dma_addr_t next_phy,
+ struct sun6i_desc *txd)
+{
+ if ((!prev && !txd) || !next)
+ return NULL;
+
+ if (!prev) {
+ txd->p_lli = next_phy;
+ txd->v_lli = next;
+ } else {
+ prev->p_lli_next = next_phy;
+ prev->v_lli_next = next;
+ }
+
+ next->p_lli_next = LLI_LAST_ITEM;
+ next->v_lli_next = NULL;
+
+ return next;
+}
+
+static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
+ dma_addr_t src,
+ dma_addr_t dst, u32 len,
+ struct dma_slave_config *config)
+{
+ u8 src_width, dst_width, src_burst, dst_burst;
+ int ret;
+
+ if (!config)
+ return -EINVAL;
+
+ ret = convert_burst(config->src_maxburst, &src_burst);
+ if (ret)
+ return ret;
+
+ ret = convert_burst(config->dst_maxburst, &dst_burst);
+ if (ret)
+ return ret;
+
+ ret = convert_buswidth(config->src_addr_width, &src_width);
+ if (ret)
+ return ret;
+
+ ret = convert_buswidth(config->dst_addr_width, &dst_width);
+ if (ret)
+ return ret;
+
+ lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
+ DMA_CHAN_CFG_SRC_WIDTH(src_width) |
+ DMA_CHAN_CFG_DST_BURST(dst_burst) |
+ DMA_CHAN_CFG_DST_WIDTH(dst_width);
+
+ lli->src = src;
+ lli->dst = dst;
+ lli->len = len;
+ lli->para = NORMAL_WAIT;
+
+ return 0;
+}
+
+static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
+ struct sun6i_dma_lli *lli)
+{
+ phys_addr_t p_lli = virt_to_phys(lli);
+
+ dev_dbg(chan2dev(&vchan->vc.chan),
+ "\n\tdesc: p - %pa v - 0x%p\n"
+ "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
+ "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
+ &p_lli, lli,
+ lli->cfg, lli->src, lli->dst,
+ lli->len, lli->para, lli->p_lli_next);
+}
+
+static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct sun6i_desc *txd = to_sun6i_desc(&vd->tx);
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device);
+ struct sun6i_dma_lli *v_lli, *v_next;
+ dma_addr_t p_lli, p_next;
+
+ if (unlikely(!txd))
+ return;
+
+ p_lli = txd->p_lli;
+ v_lli = txd->v_lli;
+
+ while (v_lli) {
+ v_next = v_lli->v_lli_next;
+ p_next = v_lli->p_lli_next;
+
+ dma_pool_free(sdev->pool, v_lli, p_lli);
+
+ v_lli = v_next;
+ p_lli = p_next;
+ }
+
+ kfree(txd);
+}
+
+static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
+ struct sun6i_pchan *pchan = vchan->phy;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock(&sdev->lock);
+ list_del_init(&vchan->node);
+ spin_unlock(&sdev->lock);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ vchan_get_all_descriptors(&vchan->vc, &head);
+
+ if (pchan) {
+ writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
+ writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
+
+ vchan->phy = NULL;
+ pchan->vchan = NULL;
+ pchan->desc = NULL;
+ pchan->done = NULL;
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&vchan->vc, &head);
+
+ return 0;
+}
+
+static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
+ struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc);
+ struct sun6i_pchan *pchan = vchan->phy;
+ u32 irq_val, irq_reg, irq_offset;
+
+ if (!pchan)
+ return -EAGAIN;
+
+ if (!desc) {
+ pchan->desc = NULL;
+ pchan->done = NULL;
+ return -EAGAIN;
+ }
+
+ list_del(&desc->node);
+
+ pchan->desc = to_sun6i_desc(&desc->tx);
+ pchan->done = NULL;
+
+ sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
+
+ irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
+ irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
+
+ irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset));
+ irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH);
+ writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset));
+
+ writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
+ writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
+
+ sun6i_dma_dump_com_regs(sdev);
+ sun6i_dma_dump_chan_regs(sdev, pchan);
+
+ return 0;
+}
+
+static void sun6i_dma_tasklet(unsigned long data)
+{
+ struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
+ struct sun6i_vchan *vchan;
+ struct sun6i_pchan *pchan;
+ unsigned int pchan_alloc = 0;
+ unsigned int pchan_idx;
+
+ list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) {
+ spin_lock_irq(&vchan->vc.lock);
+
+ pchan = vchan->phy;
+
+ if (pchan && pchan->done) {
+ if (sun6i_dma_start_desc(vchan)) {
+ /*
+ * No current txd associated with this channel
+ */
+ dev_dbg(sdev->slave.dev, "pchan %u: free\n",
+ pchan->idx);
+
+ /* Mark this channel free */
+ vchan->phy = NULL;
+ pchan->vchan = NULL;
+ }
+ }
+ spin_unlock_irq(&vchan->vc.lock);
+ }
+
+ spin_lock_irq(&sdev->lock);
+ for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ pchan = &sdev->pchans[pchan_idx];
+
+ if (pchan->vchan || list_empty(&sdev->pending))
+ continue;
+
+ vchan = list_first_entry(&sdev->pending,
+ struct sun6i_vchan, node);
+
+ /* Remove from pending channels */
+ list_del_init(&vchan->node);
+ pchan_alloc |= BIT(pchan_idx);
+
+ /* Mark this channel allocated */
+ pchan->vchan = vchan;
+ vchan->phy = pchan;
+ dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n",
+ pchan->idx, &vchan->vc);
+ }
+ spin_unlock_irq(&sdev->lock);
+
+ for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) {
+ if (!(pchan_alloc & BIT(pchan_idx)))
+ continue;
+
+ pchan = sdev->pchans + pchan_idx;
+ vchan = pchan->vchan;
+ if (vchan) {
+ spin_lock_irq(&vchan->vc.lock);
+ sun6i_dma_start_desc(vchan);
+ spin_unlock_irq(&vchan->vc.lock);
+ }
+ }
+}
+
+static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
+{
+ struct sun6i_dma_dev *sdev = dev_id;
+ struct sun6i_vchan *vchan;
+ struct sun6i_pchan *pchan;
+ int i, j, ret = IRQ_NONE;
+ u32 status;
+
+ for (i = 0; i < 2; i++) {
+ status = readl(sdev->base + DMA_IRQ_STAT(i));
+ if (!status)
+ continue;
+
+ dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n",
+ i ? "high" : "low", status);
+
+ writel(status, sdev->base + DMA_IRQ_STAT(i));
+
+ for (j = 0; (j < 8) && status; j++) {
+ if (status & DMA_IRQ_QUEUE) {
+ pchan = sdev->pchans + j;
+ vchan = pchan->vchan;
+
+ if (vchan) {
+ spin_lock(&vchan->vc.lock);
+ vchan_cookie_complete(&pchan->desc->vd);
+ pchan->done = pchan->desc;
+ spin_unlock(&vchan->vc.lock);
+ }
+ }
+
+ status = status >> 4;
+ }
+
+ if (!atomic_read(&sdev->tasklet_shutdown))
+ tasklet_schedule(&sdev->task);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun6i_dma_lli *v_lli;
+ struct sun6i_desc *txd;
+ dma_addr_t p_lli;
+ int ret;
+
+ dev_dbg(chan2dev(chan),
+ "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
+ __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags);
+
+ if (!len)
+ return NULL;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ if (!v_lli) {
+ dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
+ goto err_txd_free;
+ }
+
+ ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig);
+ if (ret)
+ goto err_dma_free;
+
+ v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_DST_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_LINEAR_MODE;
+
+ sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
+
+ sun6i_dma_dump_lli(vchan, v_lli);
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_dma_free:
+ dma_pool_free(sdev->pool, v_lli, p_lli);
+err_txd_free:
+ kfree(txd);
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct sun6i_dma_lli *v_lli, *prev = NULL;
+ struct sun6i_desc *txd;
+ struct scatterlist *sg;
+ dma_addr_t p_lli;
+ int i, ret;
+
+ if (!sgl)
+ return NULL;
+
+ if (!is_slave_direction(dir)) {
+ dev_err(chan2dev(chan), "Invalid DMA direction\n");
+ return NULL;
+ }
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
+ if (!v_lli)
+ goto err_lli_free;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg),
+ sconfig->dst_addr, sg_dma_len(sg),
+ sconfig);
+ if (ret)
+ goto err_cur_lli_free;
+
+ v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE |
+ DMA_CHAN_CFG_SRC_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_DST_DRQ(vchan->port);
+
+ dev_dbg(chan2dev(chan),
+ "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
+ __func__, vchan->vc.chan.chan_id,
+ &sconfig->dst_addr, &sg_dma_address(sg),
+ sg_dma_len(sg), flags);
+
+ } else {
+ ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr,
+ sg_dma_address(sg), sg_dma_len(sg),
+ sconfig);
+ if (ret)
+ goto err_cur_lli_free;
+
+ v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE |
+ DMA_CHAN_CFG_SRC_IO_MODE |
+ DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
+ DMA_CHAN_CFG_SRC_DRQ(vchan->port);
+
+ dev_dbg(chan2dev(chan),
+ "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
+ __func__, vchan->vc.chan.chan_id,
+ &sg_dma_address(sg), &sconfig->src_addr,
+ sg_dma_len(sg), flags);
+ }
+
+ prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
+ }
+
+ dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
+ for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
+ sun6i_dma_dump_lli(vchan, prev);
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_cur_lli_free:
+ dma_pool_free(sdev->pool, v_lli, p_lli);
+err_lli_free:
+ for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
+ dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
+ kfree(txd);
+ return NULL;
+}
+
+static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct sun6i_pchan *pchan = vchan->phy;
+ unsigned long flags;
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_RESUME:
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ if (pchan) {
+ writel(DMA_CHAN_PAUSE_RESUME,
+ pchan->base + DMA_CHAN_PAUSE);
+ } else if (!list_empty(&vchan->vc.desc_issued)) {
+ spin_lock(&sdev->lock);
+ list_add_tail(&vchan->node, &sdev->pending);
+ spin_unlock(&sdev->lock);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+ break;
+
+ case DMA_PAUSE:
+ dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
+
+ if (pchan) {
+ writel(DMA_CHAN_PAUSE_PAUSE,
+ pchan->base + DMA_CHAN_PAUSE);
+ } else {
+ spin_lock(&sdev->lock);
+ list_del_init(&vchan->node);
+ spin_unlock(&sdev->lock);
+ }
+ break;
+
+ case DMA_TERMINATE_ALL:
+ ret = sun6i_dma_terminate_all(vchan);
+ break;
+ case DMA_SLAVE_CONFIG:
+ memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config));
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ struct sun6i_pchan *pchan = vchan->phy;
+ struct sun6i_dma_lli *lli;
+ struct virt_dma_desc *vd;
+ struct sun6i_desc *txd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t bytes = 0;
+
+ ret = dma_cookie_status(chan, cookie, state);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ vd = vchan_find_desc(&vchan->vc, cookie);
+ txd = to_sun6i_desc(&vd->tx);
+
+ if (vd) {
+ for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next)
+ bytes += lli->len;
+ } else if (!pchan || !pchan->desc) {
+ bytes = 0;
+ } else {
+ bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ dma_set_residue(state, bytes);
+
+ return ret;
+}
+
+static void sun6i_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ if (vchan_issue_pending(&vchan->vc)) {
+ spin_lock(&sdev->lock);
+
+ if (!vchan->phy && list_empty(&vchan->node)) {
+ list_add_tail(&vchan->node, &sdev->pending);
+ tasklet_schedule(&sdev->task);
+ dev_dbg(chan2dev(chan), "vchan %p: issued\n",
+ &vchan->vc);
+ }
+
+ spin_unlock(&sdev->lock);
+ } else {
+ dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n",
+ &vchan->vc);
+ }
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+}
+
+static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+static void sun6i_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
+ struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&sdev->lock, flags);
+ list_del_init(&vchan->node);
+ spin_unlock_irqrestore(&sdev->lock, flags);
+
+ vchan_free_chan_resources(&vchan->vc);
+}
+
+static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct sun6i_dma_dev *sdev = ofdma->of_dma_data;
+ struct sun6i_vchan *vchan;
+ struct dma_chan *chan;
+ u8 port = dma_spec->args[0];
+
+ if (port > NR_MAX_REQUESTS)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&sdev->slave);
+ if (!chan)
+ return NULL;
+
+ vchan = to_sun6i_vchan(chan);
+ vchan->port = port;
+
+ return chan;
+}
+
+static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev)
+{
+ /* Disable all interrupts from DMA */
+ writel(0, sdev->base + DMA_IRQ_EN(0));
+ writel(0, sdev->base + DMA_IRQ_EN(1));
+
+ /* Prevent spurious interrupts from scheduling the tasklet */
+ atomic_inc(&sdev->tasklet_shutdown);
+
+ /* Make sure we won't have any further interrupts */
+ devm_free_irq(sdev->slave.dev, sdev->irq, sdev);
+
+ /* Actually prevent the tasklet from being scheduled */
+ tasklet_kill(&sdev->task);
+}
+
+static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
+{
+ int i;
+
+ for (i = 0; i < NR_MAX_VCHANS; i++) {
+ struct sun6i_vchan *vchan = &sdev->vchans[i];
+
+ list_del(&vchan->vc.chan.device_node);
+ tasklet_kill(&vchan->vc.task);
+ }
+}
+
+static int sun6i_dma_probe(struct platform_device *pdev)
+{
+ struct sun6i_dma_dev *sdc;
+ struct resource *res;
+ struct clk *mux, *pll6;
+ int ret, i;
+
+ sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
+ if (!sdc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sdc->base))
+ return PTR_ERR(sdc->base);
+
+ sdc->irq = platform_get_irq(pdev, 0);
+ if (sdc->irq < 0) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ return sdc->irq;
+ }
+
+ sdc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sdc->clk)) {
+ dev_err(&pdev->dev, "No clock specified\n");
+ return PTR_ERR(sdc->clk);
+ }
+
+ mux = clk_get(NULL, "ahb1_mux");
+ if (IS_ERR(mux)) {
+ dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n");
+ return PTR_ERR(mux);
+ }
+
+ pll6 = clk_get(NULL, "pll6");
+ if (IS_ERR(pll6)) {
+ dev_err(&pdev->dev, "Couldn't get PLL6\n");
+ clk_put(mux);
+ return PTR_ERR(pll6);
+ }
+
+ ret = clk_set_parent(mux, pll6);
+ clk_put(pll6);
+ clk_put(mux);
+
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n");
+ return ret;
+ }
+
+ sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(sdc->rstc)) {
+ dev_err(&pdev->dev, "No reset controller specified\n");
+ return PTR_ERR(sdc->rstc);
+ }
+
+ sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
+ sizeof(struct sun6i_dma_lli), 4, 0);
+ if (!sdc->pool) {
+ dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, sdc);
+ INIT_LIST_HEAD(&sdc->pending);
+ spin_lock_init(&sdc->lock);
+
+ dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
+ dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
+ dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
+
+ INIT_LIST_HEAD(&sdc->slave.channels);
+ sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources;
+ sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
+ sdc->slave.device_tx_status = sun6i_dma_tx_status;
+ sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
+ sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
+ sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
+ sdc->slave.device_control = sun6i_dma_control;
+ sdc->slave.chancnt = NR_MAX_VCHANS;
+
+ sdc->slave.dev = &pdev->dev;
+
+ sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS,
+ sizeof(struct sun6i_pchan), GFP_KERNEL);
+ if (!sdc->pchans)
+ return -ENOMEM;
+
+ sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS,
+ sizeof(struct sun6i_vchan), GFP_KERNEL);
+ if (!sdc->vchans)
+ return -ENOMEM;
+
+ tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
+
+ for (i = 0; i < NR_MAX_CHANNELS; i++) {
+ struct sun6i_pchan *pchan = &sdc->pchans[i];
+
+ pchan->idx = i;
+ pchan->base = sdc->base + 0x100 + i * 0x40;
+ }
+
+ for (i = 0; i < NR_MAX_VCHANS; i++) {
+ struct sun6i_vchan *vchan = &sdc->vchans[i];
+
+ INIT_LIST_HEAD(&vchan->node);
+ vchan->vc.desc_free = sun6i_dma_free_desc;
+ vchan_init(&vchan->vc, &sdc->slave);
+ }
+
+ ret = reset_control_deassert(sdc->rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't deassert the device from reset\n");
+ goto err_chan_free;
+ }
+
+ ret = clk_prepare_enable(sdc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't enable the clock\n");
+ goto err_reset_assert;
+ }
+
+ ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0,
+ dev_name(&pdev->dev), sdc);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_clk_disable;
+ }
+
+ ret = dma_async_device_register(&sdc->slave);
+ if (ret) {
+ dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
+ goto err_irq_disable;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate,
+ sdc);
+ if (ret) {
+ dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+ goto err_dma_unregister;
+ }
+
+ return 0;
+
+err_dma_unregister:
+ dma_async_device_unregister(&sdc->slave);
+err_irq_disable:
+ sun6i_kill_tasklet(sdc);
+err_clk_disable:
+ clk_disable_unprepare(sdc->clk);
+err_reset_assert:
+ reset_control_assert(sdc->rstc);
+err_chan_free:
+ sun6i_dma_free(sdc);
+ return ret;
+}
+
+static int sun6i_dma_remove(struct platform_device *pdev)
+{
+ struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&sdc->slave);
+
+ sun6i_kill_tasklet(sdc);
+
+ clk_disable_unprepare(sdc->clk);
+ reset_control_assert(sdc->rstc);
+
+ sun6i_dma_free(sdc);
+
+ return 0;
+}
+
+static struct of_device_id sun6i_dma_match[] = {
+ { .compatible = "allwinner,sun6i-a31-dma" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver sun6i_dma_driver = {
+ .probe = sun6i_dma_probe,
+ .remove = sun6i_dma_remove,
+ .driver = {
+ .name = "sun6i-dma",
+ .of_match_table = sun6i_dma_match,
+ },
+};
+module_platform_driver(sun6i_dma_driver);
+
+MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
+MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 03ad64ecaaf0..16efa603ff65 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1055,7 +1055,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
+ unsigned long flags)
{
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma_desc *dma_desc = NULL;
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 437e6fd47311..5b53d6183b6b 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -344,7 +344,7 @@ static const char * const pcie_port_type_strs[] = {
};
static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
- const struct acpi_generic_data *gdata)
+ const struct acpi_hest_generic_data *gdata)
{
if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE)
printk("%s""port_type: %d, %s\n", pfx, pcie->port_type,
@@ -380,7 +380,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
}
static void cper_estatus_print_section(
- const char *pfx, const struct acpi_generic_data *gdata, int sec_no)
+ const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no)
{
uuid_le *sec_type = (uuid_le *)gdata->section_type;
__u16 severity;
@@ -426,9 +426,9 @@ err_section_too_small:
}
void cper_estatus_print(const char *pfx,
- const struct acpi_generic_status *estatus)
+ const struct acpi_hest_generic_status *estatus)
{
- struct acpi_generic_data *gdata;
+ struct acpi_hest_generic_data *gdata;
unsigned int data_len, gedata_len;
int sec_no = 0;
char newpfx[64];
@@ -441,7 +441,7 @@ void cper_estatus_print(const char *pfx,
"and requires no further action");
printk("%s""event severity: %s\n", pfx, cper_severity_str(severity));
data_len = estatus->data_length;
- gdata = (struct acpi_generic_data *)(estatus + 1);
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
snprintf(newpfx, sizeof(newpfx), "%s%s", pfx, INDENT_SP);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
@@ -453,10 +453,10 @@ void cper_estatus_print(const char *pfx,
}
EXPORT_SYMBOL_GPL(cper_estatus_print);
-int cper_estatus_check_header(const struct acpi_generic_status *estatus)
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus)
{
if (estatus->data_length &&
- estatus->data_length < sizeof(struct acpi_generic_data))
+ estatus->data_length < sizeof(struct acpi_hest_generic_data))
return -EINVAL;
if (estatus->raw_data_length &&
estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
@@ -466,9 +466,9 @@ int cper_estatus_check_header(const struct acpi_generic_status *estatus)
}
EXPORT_SYMBOL_GPL(cper_estatus_check_header);
-int cper_estatus_check(const struct acpi_generic_status *estatus)
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
{
- struct acpi_generic_data *gdata;
+ struct acpi_hest_generic_data *gdata;
unsigned int data_len, gedata_len;
int rc;
@@ -476,7 +476,7 @@ int cper_estatus_check(const struct acpi_generic_status *estatus)
if (rc)
return rc;
data_len = estatus->data_length;
- gdata = (struct acpi_generic_data *)(estatus + 1);
+ gdata = (struct acpi_hest_generic_data *)(estatus + 1);
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 97cdd16a2169..018c29a26615 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -138,6 +138,27 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
return entry;
}
+int efi_get_runtime_map_size(void)
+{
+ return nr_efi_runtime_map * efi_memdesc_size;
+}
+
+int efi_get_runtime_map_desc_size(void)
+{
+ return efi_memdesc_size;
+}
+
+int efi_runtime_map_copy(void *buf, size_t bufsz)
+{
+ size_t sz = efi_get_runtime_map_size();
+
+ if (sz > bufsz)
+ sz = bufsz;
+
+ memcpy(buf, efi_runtime_map, sz);
+ return 0;
+}
+
void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size)
{
efi_runtime_map = map;
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 17cf96c45f2b..79f18e6d9c4f 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -286,7 +286,11 @@ int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = firmware_map_find_entry_bootmem(start, end, type);
+ entry = firmware_map_find_entry(start, end - 1, type);
+ if (entry)
+ return 0;
+
+ entry = firmware_map_find_entry_bootmem(start, end - 1, type);
if (!entry) {
entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
if (!entry)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 4a1b5113e527..9de1515e5808 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -340,6 +340,13 @@ config GPIO_XILINX
help
Say yes here to support the Xilinx FPGA GPIO device
+config GPIO_ZYNQ
+ tristate "Xilinx Zynq GPIO support"
+ depends on ARCH_ZYNQ
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support Xilinx Zynq GPIO controller.
+
config GPIO_XTENSA
bool "Xtensa GPIO32 support"
depends on XTENSA
@@ -423,7 +430,7 @@ config GPIO_GE_FPGA
config GPIO_LYNXPOINT
tristate "Intel Lynxpoint GPIO support"
depends on ACPI && X86
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
driver for GPIO functionality on Intel Lynxpoint PCH chipset
Requires ACPI device enumeration code to set up a platform device.
@@ -450,6 +457,19 @@ config GPIO_ARIZONA
help
Support for GPIOs on Wolfson Arizona class devices.
+config GPIO_CRYSTAL_COVE
+ tristate "GPIO support for Crystal Cove PMIC"
+ depends on INTEL_SOC_PMIC
+ select GPIOLIB_IRQCHIP
+ help
+ Support for GPIO pins on Crystal Cove PMIC.
+
+ Say Yes if you have a Intel SoC based tablet with Crystal Cove PMIC
+ inside.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-crystalcove.
+
config GPIO_LP3943
tristate "TI/National Semiconductor LP3943 GPIO expander"
depends on MFD_LP3943
@@ -573,6 +593,7 @@ config GPIO_SX150X
config GPIO_STMPE
bool "STMPE GPIOs"
depends on MFD_STMPE
+ select GPIOLIB_IRQCHIP
help
This enables support for the GPIOs found on the STMPE I/O
Expanders.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d10f6a9d875a..5d024e396622 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -4,7 +4,9 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIO_DEVRES) += devres.o
obj-$(CONFIG_GPIOLIB) += gpiolib.o
+obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
+obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
# Device drivers. Generally keep list sorted alphabetically
@@ -20,6 +22,7 @@ obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o
obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o
obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
+obj-$(CONFIG_GPIO_CRYSTAL_COVE) += gpio-crystalcove.o
obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
obj-$(CONFIG_GPIO_DA9055) += gpio-da9055.o
obj-$(CONFIG_GPIO_DAVINCI) += gpio-davinci.o
@@ -101,3 +104,4 @@ obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o
obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
+obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 65978cf85f79..41b2f40578d5 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -39,47 +39,53 @@ static int devm_gpiod_match(struct device *dev, void *res, void *data)
* devm_gpiod_get - Resource-managed gpiod_get()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
*
* Managed gpiod_get(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get() for detailed
* information about behavior and return values.
*/
-struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
- const char *con_id)
+struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
- return devm_gpiod_get_index(dev, con_id, 0);
+ return devm_gpiod_get_index(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL(devm_gpiod_get);
+EXPORT_SYMBOL(__devm_gpiod_get);
/**
* devm_gpiod_get_optional - Resource-managed gpiod_get_optional()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
*
* Managed gpiod_get_optional(). GPIO descriptors returned from this function
* are automatically disposed on driver detach. See gpiod_get_optional() for
* detailed information about behavior and return values.
*/
-struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
- const char *con_id)
+struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
- return devm_gpiod_get_index_optional(dev, con_id, 0);
+ return devm_gpiod_get_index_optional(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL(devm_gpiod_get_optional);
+EXPORT_SYMBOL(__devm_gpiod_get_optional);
/**
* devm_gpiod_get_index - Resource-managed gpiod_get_index()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
+ * @flags: optional GPIO initialization flags
*
* Managed gpiod_get_index(). GPIO descriptors returned from this function are
* automatically disposed on driver detach. See gpiod_get_index() for detailed
* information about behavior and return values.
*/
-struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev,
const char *con_id,
- unsigned int idx)
+ unsigned int idx,
+ enum gpiod_flags flags)
{
struct gpio_desc **dr;
struct gpio_desc *desc;
@@ -89,7 +95,7 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
if (!dr)
return ERR_PTR(-ENOMEM);
- desc = gpiod_get_index(dev, con_id, idx);
+ desc = gpiod_get_index(dev, con_id, idx, flags);
if (IS_ERR(desc)) {
devres_free(dr);
return desc;
@@ -100,26 +106,28 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
return desc;
}
-EXPORT_SYMBOL(devm_gpiod_get_index);
+EXPORT_SYMBOL(__devm_gpiod_get_index);
/**
* devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional()
* @dev: GPIO consumer
* @con_id: function within the GPIO consumer
* @index: index of the GPIO to obtain in the consumer
+ * @flags: optional GPIO initialization flags
*
* Managed gpiod_get_index_optional(). GPIO descriptors returned from this
* function are automatically disposed on driver detach. See
* gpiod_get_index_optional() for detailed information about behavior and
* return values.
*/
-struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check __devm_gpiod_get_index_optional(struct device *dev,
const char *con_id,
- unsigned int index)
+ unsigned int index,
+ enum gpiod_flags flags)
{
struct gpio_desc *desc;
- desc = devm_gpiod_get_index(dev, con_id, index);
+ desc = devm_gpiod_get_index(dev, con_id, index, flags);
if (IS_ERR(desc)) {
if (PTR_ERR(desc) == -ENOENT)
return NULL;
@@ -127,7 +135,7 @@ struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
return desc;
}
-EXPORT_SYMBOL(devm_gpiod_get_index_optional);
+EXPORT_SYMBOL(__devm_gpiod_get_index_optional);
/**
* devm_gpiod_put - Resource-managed gpiod_put()
diff --git a/drivers/gpio/gpio-74x164.c b/drivers/gpio/gpio-74x164.c
index e4ae29824c32..e3d968f751f1 100644
--- a/drivers/gpio/gpio-74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -167,13 +167,11 @@ exit_destroy:
static int gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip = spi_get_drvdata(spi);
- int ret;
- ret = gpiochip_remove(&chip->gpio_chip);
- if (!ret)
- mutex_destroy(&chip->lock);
+ gpiochip_remove(&chip->gpio_chip);
+ mutex_destroy(&chip->lock);
- return ret;
+ return 0;
}
static const struct of_device_id gen_74x164_dt_ids[] = {
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index b2239d678d01..416b2200d4f1 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -585,15 +585,8 @@ static int adnp_i2c_remove(struct i2c_client *client)
{
struct adnp *adnp = i2c_get_clientdata(client);
struct device_node *np = client->dev.of_node;
- int err;
-
- err = gpiochip_remove(&adnp->gpio);
- if (err < 0) {
- dev_err(&client->dev, "%s failed: %d\n", "gpiochip_remove()",
- err);
- return err;
- }
+ gpiochip_remove(&adnp->gpio);
if (of_find_property(np, "interrupt-controller", NULL))
adnp_irq_teardown(adnp);
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index f1ade8fa3218..b08bd169e568 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -167,15 +167,9 @@ err:
static int adp5520_gpio_remove(struct platform_device *pdev)
{
struct adp5520_gpio *dev;
- int ret;
dev = platform_get_drvdata(pdev);
- ret = gpiochip_remove(&dev->gpio_chip);
- if (ret) {
- dev_err(&pdev->dev, "%s failed, %d\n",
- "gpiochip_remove()", ret);
- return ret;
- }
+ gpiochip_remove(&dev->gpio_chip);
return 0;
}
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index ef19bc33f2bd..3beed6ea8c65 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -470,11 +470,7 @@ static int adp5588_gpio_remove(struct i2c_client *client)
if (dev->irq_base)
free_irq(dev->client->irq, dev);
- ret = gpiochip_remove(&dev->gpio_chip);
- if (ret) {
- dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
- return ret;
- }
+ gpiochip_remove(&dev->gpio_chip);
kfree(dev);
return 0;
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index 94e9992f8904..3c09f1a6872a 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -232,8 +232,7 @@ out:
static void __exit amd_gpio_exit(void)
{
- int err = gpiochip_remove(&gp.chip);
- WARN_ON(err);
+ gpiochip_remove(&gp.chip);
ioport_unmap(gp.pm);
release_region(gp.pmbase + PMBASE_OFFSET, PMBASE_SIZE);
}
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 29bdff558981..fe369f5c7fa6 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -149,7 +149,8 @@ static int arizona_gpio_remove(struct platform_device *pdev)
{
struct arizona_gpio *arizona_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&arizona_gpio->gpio_chip);
+ gpiochip_remove(&arizona_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver arizona_gpio_driver = {
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
new file mode 100644
index 000000000000..934462f5bd22
--- /dev/null
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -0,0 +1,380 @@
+/*
+ * gpio-crystalcove.c - Intel Crystal Cove GPIO Driver
+ *
+ * Copyright (C) 2012, 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Yang, Bin <bin.yang@intel.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/seq_file.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+
+#define CRYSTALCOVE_GPIO_NUM 16
+
+#define UPDATE_IRQ_TYPE BIT(0)
+#define UPDATE_IRQ_MASK BIT(1)
+
+#define GPIO0IRQ 0x0b
+#define GPIO1IRQ 0x0c
+#define MGPIO0IRQS0 0x19
+#define MGPIO1IRQS0 0x1a
+#define MGPIO0IRQSX 0x1b
+#define MGPIO1IRQSX 0x1c
+#define GPIO0P0CTLO 0x2b
+#define GPIO0P0CTLI 0x33
+#define GPIO1P0CTLO 0x3b
+#define GPIO1P0CTLI 0x43
+
+#define CTLI_INTCNT_DIS (0)
+#define CTLI_INTCNT_NE (1 << 1)
+#define CTLI_INTCNT_PE (2 << 1)
+#define CTLI_INTCNT_BE (3 << 1)
+
+#define CTLO_DIR_IN (0)
+#define CTLO_DIR_OUT (1 << 5)
+
+#define CTLO_DRV_CMOS (0)
+#define CTLO_DRV_OD (1 << 4)
+
+#define CTLO_DRV_REN (1 << 3)
+
+#define CTLO_RVAL_2KDW (0)
+#define CTLO_RVAL_2KUP (1 << 1)
+#define CTLO_RVAL_50KDW (2 << 1)
+#define CTLO_RVAL_50KUP (3 << 1)
+
+#define CTLO_INPUT_SET (CTLO_DRV_CMOS | CTLO_DRV_REN | CTLO_RVAL_2KUP)
+#define CTLO_OUTPUT_SET (CTLO_DIR_OUT | CTLO_INPUT_SET)
+
+enum ctrl_register {
+ CTRL_IN,
+ CTRL_OUT,
+};
+
+/**
+ * struct crystalcove_gpio - Crystal Cove GPIO controller
+ * @buslock: for bus lock/sync and unlock.
+ * @chip: the abstract gpio_chip structure.
+ * @regmap: the regmap from the parent device.
+ * @update: pending IRQ setting update, to be written to the chip upon unlock.
+ * @intcnt_value: the Interrupt Detect value to be written.
+ * @set_irq_mask: true if the IRQ mask needs to be set, false to clear.
+ */
+struct crystalcove_gpio {
+ struct mutex buslock; /* irq_bus_lock */
+ struct gpio_chip chip;
+ struct regmap *regmap;
+ int update;
+ int intcnt_value;
+ bool set_irq_mask;
+};
+
+static inline struct crystalcove_gpio *to_cg(struct gpio_chip *gc)
+{
+ return container_of(gc, struct crystalcove_gpio, chip);
+}
+
+static inline int to_reg(int gpio, enum ctrl_register reg_type)
+{
+ int reg;
+
+ if (reg_type == CTRL_IN) {
+ if (gpio < 8)
+ reg = GPIO0P0CTLI;
+ else
+ reg = GPIO1P0CTLI;
+ } else {
+ if (gpio < 8)
+ reg = GPIO0P0CTLO;
+ else
+ reg = GPIO1P0CTLO;
+ }
+
+ return reg + gpio % 8;
+}
+
+static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg,
+ int gpio)
+{
+ u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0;
+ int mask = BIT(gpio % 8);
+
+ if (cg->set_irq_mask)
+ regmap_update_bits(cg->regmap, mirqs0, mask, mask);
+ else
+ regmap_update_bits(cg->regmap, mirqs0, mask, 0);
+}
+
+static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio)
+{
+ int reg = to_reg(gpio, CTRL_IN);
+
+ regmap_update_bits(cg->regmap, reg, CTLI_INTCNT_BE, cg->intcnt_value);
+}
+
+static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio)
+{
+ struct crystalcove_gpio *cg = to_cg(chip);
+
+ return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
+ CTLO_INPUT_SET);
+}
+
+static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio,
+ int value)
+{
+ struct crystalcove_gpio *cg = to_cg(chip);
+
+ return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT),
+ CTLO_OUTPUT_SET | value);
+}
+
+static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio)
+{
+ struct crystalcove_gpio *cg = to_cg(chip);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val);
+ if (ret)
+ return ret;
+
+ return val & 0x1;
+}
+
+static void crystalcove_gpio_set(struct gpio_chip *chip,
+ unsigned gpio, int value)
+{
+ struct crystalcove_gpio *cg = to_cg(chip);
+
+ if (value)
+ regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1);
+ else
+ regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0);
+}
+
+static int crystalcove_irq_type(struct irq_data *data, unsigned type)
+{
+ struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data));
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ cg->intcnt_value = CTLI_INTCNT_DIS;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ cg->intcnt_value = CTLI_INTCNT_BE;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ cg->intcnt_value = CTLI_INTCNT_PE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ cg->intcnt_value = CTLI_INTCNT_NE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cg->update |= UPDATE_IRQ_TYPE;
+
+ return 0;
+}
+
+static void crystalcove_bus_lock(struct irq_data *data)
+{
+ struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data));
+
+ mutex_lock(&cg->buslock);
+}
+
+static void crystalcove_bus_sync_unlock(struct irq_data *data)
+{
+ struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data));
+ int gpio = data->hwirq;
+
+ if (cg->update & UPDATE_IRQ_TYPE)
+ crystalcove_update_irq_ctrl(cg, gpio);
+ if (cg->update & UPDATE_IRQ_MASK)
+ crystalcove_update_irq_mask(cg, gpio);
+ cg->update = 0;
+
+ mutex_unlock(&cg->buslock);
+}
+
+static void crystalcove_irq_unmask(struct irq_data *data)
+{
+ struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data));
+
+ cg->set_irq_mask = false;
+ cg->update |= UPDATE_IRQ_MASK;
+}
+
+static void crystalcove_irq_mask(struct irq_data *data)
+{
+ struct crystalcove_gpio *cg = to_cg(irq_data_get_irq_chip_data(data));
+
+ cg->set_irq_mask = true;
+ cg->update |= UPDATE_IRQ_MASK;
+}
+
+static struct irq_chip crystalcove_irqchip = {
+ .name = "Crystal Cove",
+ .irq_mask = crystalcove_irq_mask,
+ .irq_unmask = crystalcove_irq_unmask,
+ .irq_set_type = crystalcove_irq_type,
+ .irq_bus_lock = crystalcove_bus_lock,
+ .irq_bus_sync_unlock = crystalcove_bus_sync_unlock,
+};
+
+static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
+{
+ struct crystalcove_gpio *cg = data;
+ unsigned int p0, p1;
+ int pending;
+ int gpio;
+ unsigned int virq;
+
+ if (regmap_read(cg->regmap, GPIO0IRQ, &p0) ||
+ regmap_read(cg->regmap, GPIO1IRQ, &p1))
+ return IRQ_NONE;
+
+ regmap_write(cg->regmap, GPIO0IRQ, p0);
+ regmap_write(cg->regmap, GPIO1IRQ, p1);
+
+ pending = p0 | p1 << 8;
+
+ for (gpio = 0; gpio < cg->chip.ngpio; gpio++) {
+ if (pending & BIT(gpio)) {
+ virq = irq_find_mapping(cg->chip.irqdomain, gpio);
+ generic_handle_irq(virq);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void crystalcove_gpio_dbg_show(struct seq_file *s,
+ struct gpio_chip *chip)
+{
+ struct crystalcove_gpio *cg = to_cg(chip);
+ int gpio, offset;
+ unsigned int ctlo, ctli, mirqs0, mirqsx, irq;
+
+ for (gpio = 0; gpio < cg->chip.ngpio; gpio++) {
+ regmap_read(cg->regmap, to_reg(gpio, CTRL_OUT), &ctlo);
+ regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &ctli);
+ regmap_read(cg->regmap, gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0,
+ &mirqs0);
+ regmap_read(cg->regmap, gpio < 8 ? MGPIO0IRQSX : MGPIO1IRQSX,
+ &mirqsx);
+ regmap_read(cg->regmap, gpio < 8 ? GPIO0IRQ : GPIO1IRQ,
+ &irq);
+
+ offset = gpio % 8;
+ seq_printf(s, " gpio-%-2d %s %s %s %s ctlo=%2x,%s %s %s\n",
+ gpio, ctlo & CTLO_DIR_OUT ? "out" : "in ",
+ ctli & 0x1 ? "hi" : "lo",
+ ctli & CTLI_INTCNT_NE ? "fall" : " ",
+ ctli & CTLI_INTCNT_PE ? "rise" : " ",
+ ctlo,
+ mirqs0 & BIT(offset) ? "s0 mask " : "s0 unmask",
+ mirqsx & BIT(offset) ? "sx mask " : "sx unmask",
+ irq & BIT(offset) ? "pending" : " ");
+ }
+}
+
+static int crystalcove_gpio_probe(struct platform_device *pdev)
+{
+ int irq = platform_get_irq(pdev, 0);
+ struct crystalcove_gpio *cg;
+ int retval;
+ struct device *dev = pdev->dev.parent;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ if (irq < 0)
+ return irq;
+
+ cg = devm_kzalloc(&pdev->dev, sizeof(*cg), GFP_KERNEL);
+ if (!cg)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cg);
+
+ mutex_init(&cg->buslock);
+ cg->chip.label = KBUILD_MODNAME;
+ cg->chip.direction_input = crystalcove_gpio_dir_in;
+ cg->chip.direction_output = crystalcove_gpio_dir_out;
+ cg->chip.get = crystalcove_gpio_get;
+ cg->chip.set = crystalcove_gpio_set;
+ cg->chip.base = -1;
+ cg->chip.ngpio = CRYSTALCOVE_GPIO_NUM;
+ cg->chip.can_sleep = true;
+ cg->chip.dev = dev;
+ cg->chip.dbg_show = crystalcove_gpio_dbg_show;
+ cg->regmap = pmic->regmap;
+
+ retval = gpiochip_add(&cg->chip);
+ if (retval) {
+ dev_warn(&pdev->dev, "add gpio chip error: %d\n", retval);
+ return retval;
+ }
+
+ gpiochip_irqchip_add(&cg->chip, &crystalcove_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+
+ retval = request_threaded_irq(irq, NULL, crystalcove_gpio_irq_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, cg);
+
+ if (retval) {
+ dev_warn(&pdev->dev, "request irq failed: %d\n", retval);
+ goto out_remove_gpio;
+ }
+
+ return 0;
+
+out_remove_gpio:
+ WARN_ON(gpiochip_remove(&cg->chip));
+ return retval;
+}
+
+static int crystalcove_gpio_remove(struct platform_device *pdev)
+{
+ struct crystalcove_gpio *cg = platform_get_drvdata(pdev);
+ int irq = platform_get_irq(pdev, 0);
+ int err;
+
+ err = gpiochip_remove(&cg->chip);
+
+ if (irq >= 0)
+ free_irq(irq, cg);
+
+ return err;
+}
+
+static struct platform_driver crystalcove_gpio_driver = {
+ .probe = crystalcove_gpio_probe,
+ .remove = crystalcove_gpio_remove,
+ .driver = {
+ .name = "crystal_cove_gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(crystalcove_gpio_driver);
+
+MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>");
+MODULE_DESCRIPTION("Intel Crystal Cove GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index c0a3aeba6f21..92ec58fa9236 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -358,14 +358,8 @@ done:
static int cs5535_gpio_remove(struct platform_device *pdev)
{
struct resource *r;
- int err;
- err = gpiochip_remove(&cs5535_gpio_chip.chip);
- if (err) {
- /* uhh? */
- dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
- return err;
- }
+ gpiochip_remove(&cs5535_gpio_chip.chip);
r = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(r->start, resource_size(r));
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index 416cdf786b05..c5bccd4dec96 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -237,7 +237,8 @@ static int da9052_gpio_remove(struct platform_device *pdev)
{
struct da9052_gpio *gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&gpio->gp);
+ gpiochip_remove(&gpio->gp);
+ return 0;
}
static struct platform_driver da9052_gpio_driver = {
diff --git a/drivers/gpio/gpio-da9055.c b/drivers/gpio/gpio-da9055.c
index f992997bc301..9167c4331081 100644
--- a/drivers/gpio/gpio-da9055.c
+++ b/drivers/gpio/gpio-da9055.c
@@ -174,7 +174,8 @@ static int da9055_gpio_remove(struct platform_device *pdev)
{
struct da9055_gpio *gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&gpio->gp);
+ gpiochip_remove(&gpio->gp);
+ return 0;
}
static struct platform_driver da9055_gpio_driver = {
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index cd3b81435274..d6618a6e2399 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -359,7 +359,7 @@ static void dwapb_gpio_unregister(struct dwapb_gpio *gpio)
for (m = 0; m < gpio->nr_ports; ++m)
if (gpio->ports[m].is_registered)
- WARN_ON(gpiochip_remove(&gpio->ports[m].bgc.gc));
+ gpiochip_remove(&gpio->ports[m].bgc.gc);
}
static int dwapb_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index cde36054c387..fe49ec3cdb7d 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -409,11 +409,8 @@ err0:
static int em_gio_remove(struct platform_device *pdev)
{
struct em_gio_priv *p = platform_get_drvdata(pdev);
- int ret;
- ret = gpiochip_remove(&p->gpio_chip);
- if (ret)
- return ret;
+ gpiochip_remove(&p->gpio_chip);
irq_domain_remove(p->irq_domain);
return 0;
diff --git a/drivers/gpio/gpio-f7188x.c b/drivers/gpio/gpio-f7188x.c
index 8f73ee093739..fd3202f968ff 100644
--- a/drivers/gpio/gpio-f7188x.c
+++ b/drivers/gpio/gpio-f7188x.c
@@ -317,13 +317,7 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
err_gpiochip:
for (i = i - 1; i >= 0; i--) {
struct f7188x_gpio_bank *bank = &data->bank[i];
- int tmp;
-
- tmp = gpiochip_remove(&bank->chip);
- if (tmp < 0)
- dev_err(&pdev->dev,
- "Failed to remove gpiochip %d: %d\n",
- i, tmp);
+ gpiochip_remove(&bank->chip);
}
return err;
@@ -331,20 +325,12 @@ err_gpiochip:
static int f7188x_gpio_remove(struct platform_device *pdev)
{
- int err;
int i;
struct f7188x_gpio_data *data = platform_get_drvdata(pdev);
for (i = 0; i < data->nr_bank; i++) {
struct f7188x_gpio_bank *bank = &data->bank[i];
-
- err = gpiochip_remove(&bank->chip);
- if (err) {
- dev_err(&pdev->dev,
- "Failed to remove GPIO gpiochip %d: %d\n",
- i, err);
- return err;
- }
+ gpiochip_remove(&bank->chip);
}
return 0;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index fea8c82bb8fc..16f6115e5bdb 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -398,7 +398,8 @@ static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
int bgpio_remove(struct bgpio_chip *bgc)
{
- return gpiochip_remove(&bgc->gc);
+ gpiochip_remove(&bgc->gc);
+ return 0;
}
EXPORT_SYMBOL_GPL(bgpio_remove);
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 3c3f515b7916..66ad3df9d9cf 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -468,9 +468,7 @@ static int grgpio_remove(struct platform_device *ofdev)
}
}
- ret = gpiochip_remove(&priv->bgc.gc);
- if (ret)
- goto out;
+ gpiochip_remove(&priv->bgc.gc);
if (priv->domain)
irq_domain_remove(priv->domain);
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
index 70304220a479..3784e81e7762 100644
--- a/drivers/gpio/gpio-ich.c
+++ b/drivers/gpio/gpio-ich.c
@@ -514,14 +514,7 @@ add_err:
static int ichx_gpio_remove(struct platform_device *pdev)
{
- int err;
-
- err = gpiochip_remove(&ichx_priv.chip);
- if (err) {
- dev_err(&pdev->dev, "%s failed, %d\n",
- "gpiochip_remove()", err);
- return err;
- }
+ gpiochip_remove(&ichx_priv.chip);
ichx_gpio_release_regions(ichx_priv.gpio_base, ichx_priv.use_gpio);
if (ichx_priv.pm_base)
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index 118a6bf455d9..aa28c65eb6b4 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -28,12 +28,10 @@
#include <linux/stddef.h>
#include <linux/interrupt.h>
#include <linux/init.h>
-#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#include <linux/irqdomain.h>
#define INTEL_MID_IRQ_TYPE_EDGE (1 << 0)
#define INTEL_MID_IRQ_TYPE_LEVEL (1 << 1)
@@ -78,10 +76,12 @@ struct intel_mid_gpio {
void __iomem *reg_base;
spinlock_t lock;
struct pci_dev *pdev;
- struct irq_domain *domain;
};
-#define to_intel_gpio_priv(chip) container_of(chip, struct intel_mid_gpio, chip)
+static inline struct intel_mid_gpio *to_intel_gpio_priv(struct gpio_chip *gc)
+{
+ return container_of(gc, struct intel_mid_gpio, chip);
+}
static void __iomem *gpio_reg(struct gpio_chip *chip, unsigned offset,
enum GPIO_REG reg_type)
@@ -182,15 +182,10 @@ static int intel_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static int intel_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct intel_mid_gpio *priv = to_intel_gpio_priv(chip);
- return irq_create_mapping(priv->domain, offset);
-}
-
static int intel_mid_irq_type(struct irq_data *d, unsigned type)
{
- struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct intel_mid_gpio *priv = to_intel_gpio_priv(gc);
u32 gpio = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
@@ -231,33 +226,11 @@ static void intel_mid_irq_mask(struct irq_data *d)
{
}
-static int intel_mid_irq_reqres(struct irq_data *d)
-{
- struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
-
- if (gpio_lock_as_irq(&priv->chip, irqd_to_hwirq(d))) {
- dev_err(priv->chip.dev,
- "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return -EINVAL;
- }
- return 0;
-}
-
-static void intel_mid_irq_relres(struct irq_data *d)
-{
- struct intel_mid_gpio *priv = irq_data_get_irq_chip_data(d);
-
- gpio_unlock_as_irq(&priv->chip, irqd_to_hwirq(d));
-}
-
static struct irq_chip intel_mid_irqchip = {
.name = "INTEL_MID-GPIO",
.irq_mask = intel_mid_irq_mask,
.irq_unmask = intel_mid_irq_unmask,
.irq_set_type = intel_mid_irq_type,
- .irq_request_resources = intel_mid_irq_reqres,
- .irq_release_resources = intel_mid_irq_relres,
};
static const struct intel_mid_gpio_ddata gpio_lincroft = {
@@ -330,8 +303,9 @@ MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc)
{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct intel_mid_gpio *priv = to_intel_gpio_priv(gc);
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct intel_mid_gpio *priv = irq_data_get_irq_handler_data(data);
struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, gpio, mask;
unsigned long pending;
@@ -345,7 +319,7 @@ static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc)
mask = BIT(gpio);
/* Clear before handling so we can't lose an edge */
writel(mask, gedr);
- generic_handle_irq(irq_find_mapping(priv->domain,
+ generic_handle_irq(irq_find_mapping(gc->irqdomain,
base + gpio));
}
}
@@ -371,23 +345,6 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
}
}
-static int intel_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct intel_mid_gpio *priv = d->host_data;
-
- irq_set_chip_and_handler(irq, &intel_mid_irqchip, handle_simple_irq);
- irq_set_chip_data(irq, priv);
- irq_set_irq_type(irq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static const struct irq_domain_ops intel_gpio_irq_ops = {
- .map = intel_gpio_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
static int intel_gpio_runtime_idle(struct device *dev)
{
int err = pm_schedule_suspend(dev, 500);
@@ -441,7 +398,6 @@ static int intel_gpio_probe(struct pci_dev *pdev,
priv->chip.direction_output = intel_gpio_direction_output;
priv->chip.get = intel_gpio_get;
priv->chip.set = intel_gpio_set;
- priv->chip.to_irq = intel_gpio_to_irq;
priv->chip.base = gpio_base;
priv->chip.ngpio = ddata->ngpio;
priv->chip.can_sleep = false;
@@ -449,11 +405,6 @@ static int intel_gpio_probe(struct pci_dev *pdev,
spin_lock_init(&priv->lock);
- priv->domain = irq_domain_add_simple(pdev->dev.of_node, ddata->ngpio,
- irq_base, &intel_gpio_irq_ops, priv);
- if (!priv->domain)
- return -ENOMEM;
-
pci_set_drvdata(pdev, priv);
retval = gpiochip_add(&priv->chip);
if (retval) {
@@ -461,10 +412,23 @@ static int intel_gpio_probe(struct pci_dev *pdev,
return retval;
}
+ retval = gpiochip_irqchip_add(&priv->chip,
+ &intel_mid_irqchip,
+ irq_base,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (retval) {
+ dev_err(&pdev->dev,
+ "could not connect irqchip to gpiochip\n");
+ return retval;
+ }
+
intel_mid_irq_init_hw(priv);
- irq_set_handler_data(pdev->irq, priv);
- irq_set_chained_handler(pdev->irq, intel_mid_irq_handler);
+ gpiochip_set_chained_irqchip(&priv->chip,
+ &intel_mid_irqchip,
+ pdev->irq,
+ intel_mid_irq_handler);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/gpio/gpio-it8761e.c b/drivers/gpio/gpio-it8761e.c
index 278b81317010..dadfc245cf09 100644
--- a/drivers/gpio/gpio-it8761e.c
+++ b/drivers/gpio/gpio-it8761e.c
@@ -217,11 +217,7 @@ gpiochip_add_err:
static void __exit it8761e_gpio_exit(void)
{
if (gpio_ba) {
- int ret = gpiochip_remove(&it8761e_gpio_chip);
-
- WARN(ret, "%s(): gpiochip_remove() failed, ret=%d\n",
- __func__, ret);
-
+ gpiochip_remove(&it8761e_gpio_chip);
release_region(gpio_ba, GPIO_IOSIZE);
gpio_ba = 0;
}
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 42852eaaf020..29ffe22ad97a 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -194,14 +194,8 @@ static int ttl_probe(struct platform_device *pdev)
static int ttl_remove(struct platform_device *pdev)
{
struct ttl_module *mod = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- int ret;
- ret = gpiochip_remove(&mod->gpio);
- if (ret) {
- dev_err(dev, "unable to remove GPIO chip\n");
- return ret;
- }
+ gpiochip_remove(&mod->gpio);
return 0;
}
diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c
index 1e5e51987d31..fd150adeebf9 100644
--- a/drivers/gpio/gpio-kempld.c
+++ b/drivers/gpio/gpio-kempld.c
@@ -199,7 +199,8 @@ static int kempld_gpio_remove(struct platform_device *pdev)
{
struct kempld_gpio_data *gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&gpio->chip);
+ gpiochip_remove(&gpio->chip);
+ return 0;
}
static struct platform_driver kempld_gpio_driver = {
diff --git a/drivers/gpio/gpio-lp3943.c b/drivers/gpio/gpio-lp3943.c
index a0341c92bcb4..6bbdad805b78 100644
--- a/drivers/gpio/gpio-lp3943.c
+++ b/drivers/gpio/gpio-lp3943.c
@@ -216,7 +216,8 @@ static int lp3943_gpio_remove(struct platform_device *pdev)
{
struct lp3943_gpio *lp3943_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&lp3943_gpio->chip);
+ gpiochip_remove(&lp3943_gpio->chip);
+ return 0;
}
static const struct of_device_id lp3943_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 225344d66404..b9b9799b368b 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -560,7 +560,7 @@ static int lpc32xx_gpio_probe(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id lpc32xx_gpio_of_match[] = {
+static const struct of_device_id lpc32xx_gpio_of_match[] = {
{ .compatible = "nxp,lpc3220-gpio", },
{ },
};
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 2bea89b72508..ff9eb911b5e4 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -25,9 +25,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/gpio.h>
-#include <linux/irqdomain.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -62,7 +60,6 @@
struct lp_gpio {
struct gpio_chip chip;
- struct irq_domain *domain;
struct platform_device *pdev;
spinlock_t lock;
unsigned long reg_base;
@@ -151,7 +148,8 @@ static void lp_gpio_free(struct gpio_chip *chip, unsigned offset)
static int lp_irq_type(struct irq_data *d, unsigned type)
{
- struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip);
u32 hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
@@ -236,16 +234,11 @@ static int lp_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static int lp_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct lp_gpio *lg = container_of(chip, struct lp_gpio, chip);
- return irq_create_mapping(lg->domain, offset);
-}
-
static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip);
struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, pin, mask;
unsigned long reg, ena, pending;
@@ -262,7 +255,7 @@ static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc)
mask = BIT(pin);
/* Clear before handling so we don't lose an edge */
outl(mask, reg);
- irq = irq_find_mapping(lg->domain, base + pin);
+ irq = irq_find_mapping(lg->chip.irqdomain, base + pin);
generic_handle_irq(irq);
}
}
@@ -279,7 +272,8 @@ static void lp_irq_mask(struct irq_data *d)
static void lp_irq_enable(struct irq_data *d)
{
- struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip);
u32 hwirq = irqd_to_hwirq(d);
unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
unsigned long flags;
@@ -291,7 +285,8 @@ static void lp_irq_enable(struct irq_data *d)
static void lp_irq_disable(struct irq_data *d)
{
- struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct lp_gpio *lg = container_of(gc, struct lp_gpio, chip);
u32 hwirq = irqd_to_hwirq(d);
unsigned long reg = lp_gpio_reg(&lg->chip, hwirq, LP_INT_ENABLE);
unsigned long flags;
@@ -301,26 +296,6 @@ static void lp_irq_disable(struct irq_data *d)
spin_unlock_irqrestore(&lg->lock, flags);
}
-static int lp_irq_reqres(struct irq_data *d)
-{
- struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
-
- if (gpio_lock_as_irq(&lg->chip, irqd_to_hwirq(d))) {
- dev_err(lg->chip.dev,
- "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return -EINVAL;
- }
- return 0;
-}
-
-static void lp_irq_relres(struct irq_data *d)
-{
- struct lp_gpio *lg = irq_data_get_irq_chip_data(d);
-
- gpio_unlock_as_irq(&lg->chip, irqd_to_hwirq(d));
-}
-
static struct irq_chip lp_irqchip = {
.name = "LP-GPIO",
.irq_mask = lp_irq_mask,
@@ -328,8 +303,6 @@ static struct irq_chip lp_irqchip = {
.irq_enable = lp_irq_enable,
.irq_disable = lp_irq_disable,
.irq_set_type = lp_irq_type,
- .irq_request_resources = lp_irq_reqres,
- .irq_release_resources = lp_irq_relres,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -348,22 +321,6 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
}
}
-static int lp_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct lp_gpio *lg = d->host_data;
-
- irq_set_chip_and_handler(irq, &lp_irqchip, handle_simple_irq);
- irq_set_chip_data(irq, lg);
- irq_set_irq_type(irq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static const struct irq_domain_ops lp_gpio_irq_ops = {
- .map = lp_gpio_irq_map,
-};
-
static int lp_gpio_probe(struct platform_device *pdev)
{
struct lp_gpio *lg;
@@ -371,7 +328,6 @@ static int lp_gpio_probe(struct platform_device *pdev)
struct resource *io_rc, *irq_rc;
struct device *dev = &pdev->dev;
unsigned long reg_len;
- unsigned hwirq;
int ret = -ENODEV;
lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL);
@@ -414,27 +370,28 @@ static int lp_gpio_probe(struct platform_device *pdev)
gc->can_sleep = false;
gc->dev = dev;
+ ret = gpiochip_add(gc);
+ if (ret) {
+ dev_err(dev, "failed adding lp-gpio chip\n");
+ return ret;
+ }
+
/* set up interrupts */
if (irq_rc && irq_rc->start) {
- hwirq = irq_rc->start;
- gc->to_irq = lp_gpio_to_irq;
-
- lg->domain = irq_domain_add_linear(NULL, LP_NUM_GPIO,
- &lp_gpio_irq_ops, lg);
- if (!lg->domain)
- return -ENXIO;
-
lp_gpio_irq_init_hw(lg);
+ ret = gpiochip_irqchip_add(gc, &lp_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "failed to add irqchip\n");
+ gpiochip_remove(gc);
+ return ret;
+ }
- irq_set_handler_data(hwirq, lg);
- irq_set_chained_handler(hwirq, lp_gpio_irq_handler);
+ gpiochip_set_chained_irqchip(gc, &lp_irqchip,
+ (unsigned)irq_rc->start,
+ lp_gpio_irq_handler);
}
- ret = gpiochip_add(gc);
- if (ret) {
- dev_err(dev, "failed adding lp-gpio chip\n");
- return ret;
- }
pm_runtime_enable(dev);
return 0;
@@ -465,11 +422,8 @@ MODULE_DEVICE_TABLE(acpi, lynxpoint_gpio_acpi_match);
static int lp_gpio_remove(struct platform_device *pdev)
{
struct lp_gpio *lg = platform_get_drvdata(pdev);
- int err;
pm_runtime_disable(&pdev->dev);
- err = gpiochip_remove(&lg->chip);
- if (err)
- dev_warn(&pdev->dev, "failed to remove gpio_chip.\n");
+ gpiochip_remove(&lg->chip);
return 0;
}
diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c
index 0814584fcdc1..18ab89e20806 100644
--- a/drivers/gpio/gpio-max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -228,21 +228,16 @@ EXPORT_SYMBOL_GPL(__max730x_probe);
int __max730x_remove(struct device *dev)
{
struct max7301 *ts = dev_get_drvdata(dev);
- int ret;
if (ts == NULL)
return -ENODEV;
/* Power down the chip and disable IRQ output */
ts->write(dev, 0x04, 0x00);
-
- ret = gpiochip_remove(&ts->chip);
- if (!ret)
- mutex_destroy(&ts->lock);
- else
- dev_err(dev, "Failed to remove GPIO controller: %d\n", ret);
-
- return ret;
+ gpiochip_remove(&ts->chip);
+ mutex_destroy(&ts->lock);
+ kfree(ts);
+ return 0;
}
EXPORT_SYMBOL_GPL(__max730x_remove);
diff --git a/drivers/gpio/gpio-max732x.c b/drivers/gpio/gpio-max732x.c
index 7c36f2b0983d..6c676225b886 100644
--- a/drivers/gpio/gpio-max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -676,12 +676,7 @@ static int max732x_remove(struct i2c_client *client)
}
}
- ret = gpiochip_remove(&chip->gpio_chip);
- if (ret) {
- dev_err(&client->dev, "%s failed, %d\n",
- "gpiochip_remove()", ret);
- return ret;
- }
+ gpiochip_remove(&chip->gpio_chip);
max732x_irq_teardown(chip);
diff --git a/drivers/gpio/gpio-mc33880.c b/drivers/gpio/gpio-mc33880.c
index 553a80a5eaf3..4e3e160e5db2 100644
--- a/drivers/gpio/gpio-mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -149,20 +149,15 @@ exit_destroy:
static int mc33880_remove(struct spi_device *spi)
{
struct mc33880 *mc;
- int ret;
mc = spi_get_drvdata(spi);
if (mc == NULL)
return -ENODEV;
- ret = gpiochip_remove(&mc->chip);
- if (!ret)
- mutex_destroy(&mc->lock);
- else
- dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
- ret);
+ gpiochip_remove(&mc->chip);
+ mutex_destroy(&mc->lock);
- return ret;
+ return 0;
}
static struct spi_driver mc33880_driver = {
diff --git a/drivers/gpio/gpio-mc9s08dz60.c b/drivers/gpio/gpio-mc9s08dz60.c
index dce35ff00db7..d62b4f8182bf 100644
--- a/drivers/gpio/gpio-mc9s08dz60.c
+++ b/drivers/gpio/gpio-mc9s08dz60.c
@@ -118,7 +118,8 @@ static int mc9s08dz60_remove(struct i2c_client *client)
mc9s = i2c_get_clientdata(client);
- return gpiochip_remove(&mc9s->chip);
+ gpiochip_remove(&mc9s->chip);
+ return 0;
}
static const struct i2c_device_id mc9s08dz60_id[] = {
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 57adbc90fdad..6f183d9b487e 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -812,16 +812,14 @@ fail:
static int mcp230xx_remove(struct i2c_client *client)
{
struct mcp23s08 *mcp = i2c_get_clientdata(client);
- int status;
if (client->irq && mcp->irq_controller)
mcp23s08_irq_teardown(mcp);
- status = gpiochip_remove(&mcp->chip);
- if (status == 0)
- kfree(mcp);
+ gpiochip_remove(&mcp->chip);
+ kfree(mcp);
- return status;
+ return 0;
}
static const struct i2c_device_id mcp230xx_id[] = {
@@ -960,13 +958,10 @@ static int mcp23s08_probe(struct spi_device *spi)
fail:
for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
- int tmp;
if (!data->mcp[addr])
continue;
- tmp = gpiochip_remove(&data->mcp[addr]->chip);
- if (tmp < 0)
- dev_err(&spi->dev, "%s --> %d\n", "remove", tmp);
+ gpiochip_remove(&data->mcp[addr]->chip);
}
kfree(data);
return status;
@@ -976,23 +971,16 @@ static int mcp23s08_remove(struct spi_device *spi)
{
struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
unsigned addr;
- int status = 0;
for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
- int tmp;
if (!data->mcp[addr])
continue;
- tmp = gpiochip_remove(&data->mcp[addr]->chip);
- if (tmp < 0) {
- dev_err(&spi->dev, "%s --> %d\n", "remove", tmp);
- status = tmp;
- }
+ gpiochip_remove(&data->mcp[addr]->chip);
}
- if (status == 0)
- kfree(data);
- return status;
+ kfree(data);
+ return 0;
}
static const struct spi_device_id mcp23s08_ids[] = {
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index d51329d23d38..5536108aa9db 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -497,8 +497,7 @@ err_irq_alloc_descs:
err_gpiochip_add:
while (--i >= 0) {
chip--;
- if (gpiochip_remove(&chip->gpio))
- dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
+ gpiochip_remove(&chip->gpio);
}
kfree(chip_save);
@@ -519,7 +518,6 @@ err_pci_enable:
static void ioh_gpio_remove(struct pci_dev *pdev)
{
- int err;
int i;
struct ioh_gpio *chip = pci_get_drvdata(pdev);
void *chip_save;
@@ -530,9 +528,7 @@ static void ioh_gpio_remove(struct pci_dev *pdev)
for (i = 0; i < 8; i++, chip++) {
irq_free_descs(chip->irq_base, num_ports[i]);
- err = gpiochip_remove(&chip->gpio);
- if (err)
- dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+ gpiochip_remove(&chip->gpio);
}
chip = chip_save;
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index a3351acd4963..94f57670df9a 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -438,10 +438,7 @@ MODULE_DEVICE_TABLE(of, msm_gpio_of_match);
static int msm_gpio_remove(struct platform_device *dev)
{
- int ret = gpiochip_remove(&msm_gpio.gpio_chip);
-
- if (ret < 0)
- return ret;
+ gpiochip_remove(&msm_gpio.gpio_chip);
irq_set_handler(msm_gpio.summary_irq, NULL);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index db83b3c0a449..f4e54a92e04a 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -485,7 +485,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
out_irqdesc_free:
irq_free_descs(irq_base, 32);
out_gpiochip_remove:
- WARN_ON(gpiochip_remove(&port->bgc.gc) < 0);
+ gpiochip_remove(&port->bgc.gc);
out_bgpio_remove:
bgpio_remove(&port->bgc);
out_bgio:
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index dbb08546b9ec..5c5770c99c80 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -129,7 +129,8 @@ out:
static int octeon_gpio_remove(struct platform_device *pdev)
{
struct gpio_chip *chip = pdev->dev.platform_data;
- return gpiochip_remove(chip);
+ gpiochip_remove(chip);
+ return 0;
}
static struct of_device_id octeon_gpio_match[] = {
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 00f29aa1fb9d..174932165fcb 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -24,7 +24,6 @@
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/gpio.h>
#include <linux/bitops.h>
#include <linux/platform_data/gpio-omap.h>
@@ -89,18 +88,19 @@ struct gpio_bank {
#define BANK_USED(bank) (bank->mod_usage || bank->irq_usage)
#define LINE_USED(line, offset) (line & (BIT(offset)))
-static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
+static int omap_irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
return bank->chip.base + gpio_irq;
}
-static inline struct gpio_bank *_irq_data_get_bank(struct irq_data *d)
+static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
return container_of(chip, struct gpio_bank, chip);
}
-static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
+static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio,
+ int is_input)
{
void __iomem *reg = bank->base;
u32 l;
@@ -117,7 +117,8 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
/* set data out value using dedicate set/clear register */
-static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
+static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, int gpio,
+ int enable)
{
void __iomem *reg = bank->base;
u32 l = GPIO_BIT(bank, gpio);
@@ -134,7 +135,8 @@ static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
}
/* set data out value using mask register */
-static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
+static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, int gpio,
+ int enable)
{
void __iomem *reg = bank->base + bank->regs->dataout;
u32 gpio_bit = GPIO_BIT(bank, gpio);
@@ -149,21 +151,21 @@ static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
bank->context.dataout = l;
}
-static int _get_gpio_datain(struct gpio_bank *bank, int offset)
+static int omap_get_gpio_datain(struct gpio_bank *bank, int offset)
{
void __iomem *reg = bank->base + bank->regs->datain;
return (readl_relaxed(reg) & (BIT(offset))) != 0;
}
-static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
+static int omap_get_gpio_dataout(struct gpio_bank *bank, int offset)
{
void __iomem *reg = bank->base + bank->regs->dataout;
return (readl_relaxed(reg) & (BIT(offset))) != 0;
}
-static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
+static inline void omap_gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
int l = readl_relaxed(base + reg);
@@ -175,7 +177,7 @@ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
writel_relaxed(l, base + reg);
}
-static inline void _gpio_dbck_enable(struct gpio_bank *bank)
+static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && !bank->dbck_enabled) {
clk_prepare_enable(bank->dbck);
@@ -186,7 +188,7 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
}
}
-static inline void _gpio_dbck_disable(struct gpio_bank *bank)
+static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && bank->dbck_enabled) {
/*
@@ -202,7 +204,7 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank)
}
/**
- * _set_gpio_debounce - low level gpio debounce time
+ * omap2_set_gpio_debounce - low level gpio debounce time
* @bank: the gpio bank we're acting upon
* @gpio: the gpio number on this @gpio
* @debounce: debounce time to use
@@ -210,8 +212,8 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank)
* OMAP's debounce time is in 31us steps so we need
* to convert and round up to the closest unit.
*/
-static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
- unsigned debounce)
+static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
+ unsigned debounce)
{
void __iomem *reg;
u32 val;
@@ -252,7 +254,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
* used within _gpio_dbck_enable() is still not initialized at
* that point. Therefore we have to enable dbck here.
*/
- _gpio_dbck_enable(bank);
+ omap_gpio_dbck_enable(bank);
if (bank->dbck_enable_mask) {
bank->context.debounce = debounce;
bank->context.debounce_en = val;
@@ -260,7 +262,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
}
/**
- * _clear_gpio_debounce - clear debounce settings for a gpio
+ * omap_clear_gpio_debounce - clear debounce settings for a gpio
* @bank: the gpio bank we're acting upon
* @gpio: the gpio number on this @gpio
*
@@ -269,7 +271,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
* time too. The debounce clock will also be disabled when calling this function
* if this is the only gpio in the bank using debounce.
*/
-static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
+static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
{
u32 gpio_bit = GPIO_BIT(bank, gpio);
@@ -293,20 +295,20 @@ static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
}
}
-static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
+static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
void __iomem *base = bank->base;
u32 gpio_bit = BIT(gpio);
- _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_LOW);
- _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
- trigger & IRQ_TYPE_LEVEL_HIGH);
- _gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_RISING);
- _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
- trigger & IRQ_TYPE_EDGE_FALLING);
+ omap_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_LOW);
+ omap_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
+ trigger & IRQ_TYPE_LEVEL_HIGH);
+ omap_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_RISING);
+ omap_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
+ trigger & IRQ_TYPE_EDGE_FALLING);
bank->context.leveldetect0 =
readl_relaxed(bank->base + bank->regs->leveldetect0);
@@ -318,7 +320,7 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
readl_relaxed(bank->base + bank->regs->fallingdetect);
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
- _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
+ omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
bank->context.wake_en =
readl_relaxed(bank->base + bank->regs->wkup_en);
}
@@ -354,7 +356,7 @@ exit:
* This only applies to chips that can't do both rising and falling edge
* detection at once. For all other chips, this function is a noop.
*/
-static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
+static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
{
void __iomem *reg = bank->base;
u32 l = 0;
@@ -373,18 +375,18 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
writel_relaxed(l, reg);
}
#else
-static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
+static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
#endif
-static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
- unsigned trigger)
+static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio,
+ unsigned trigger)
{
void __iomem *reg = bank->base;
void __iomem *base = bank->base;
u32 l = 0;
if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
- set_gpio_trigger(bank, gpio, trigger);
+ omap_set_gpio_trigger(bank, gpio, trigger);
} else if (bank->regs->irqctrl) {
reg += bank->regs->irqctrl;
@@ -414,7 +416,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
l |= BIT(gpio << 1);
/* Enable wake-up during idle for dynamic tick */
- _gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
+ omap_gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger);
bank->context.wake_en =
readl_relaxed(bank->base + bank->regs->wkup_en);
writel_relaxed(l, reg);
@@ -422,7 +424,7 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
return 0;
}
-static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
+static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset)
{
if (bank->regs->pinctrl) {
void __iomem *reg = bank->base + bank->regs->pinctrl;
@@ -443,7 +445,7 @@ static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset)
}
}
-static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
+static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset)
{
void __iomem *base = bank->base;
@@ -451,7 +453,7 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
!LINE_USED(bank->mod_usage, offset) &&
!LINE_USED(bank->irq_usage, offset)) {
/* Disable wake-up during idle for dynamic tick */
- _gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
+ omap_gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0);
bank->context.wake_en =
readl_relaxed(bank->base + bank->regs->wkup_en);
}
@@ -468,16 +470,16 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset)
}
}
-static int gpio_is_input(struct gpio_bank *bank, int mask)
+static int omap_gpio_is_input(struct gpio_bank *bank, int mask)
{
void __iomem *reg = bank->base + bank->regs->direction;
return readl_relaxed(reg) & mask;
}
-static int gpio_irq_type(struct irq_data *d, unsigned type)
+static int omap_gpio_irq_type(struct irq_data *d, unsigned type)
{
- struct gpio_bank *bank = _irq_data_get_bank(d);
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
unsigned gpio = 0;
int retval;
unsigned long flags;
@@ -492,7 +494,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
#endif
if (!gpio)
- gpio = irq_to_gpio(bank, d->hwirq);
+ gpio = omap_irq_to_gpio(bank, d->hwirq);
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
@@ -503,11 +505,11 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
spin_lock_irqsave(&bank->lock, flags);
offset = GPIO_INDEX(bank, gpio);
- retval = _set_gpio_triggering(bank, offset, type);
+ retval = omap_set_gpio_triggering(bank, offset, type);
if (!LINE_USED(bank->mod_usage, offset)) {
- _enable_gpio_module(bank, offset);
- _set_gpio_direction(bank, offset, 1);
- } else if (!gpio_is_input(bank, BIT(offset))) {
+ omap_enable_gpio_module(bank, offset);
+ omap_set_gpio_direction(bank, offset, 1);
+ } else if (!omap_gpio_is_input(bank, BIT(offset))) {
spin_unlock_irqrestore(&bank->lock, flags);
return -EINVAL;
}
@@ -523,7 +525,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
return retval;
}
-static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
@@ -540,12 +542,12 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
readl_relaxed(reg);
}
-static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
+static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
- _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
+ omap_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
-static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
+static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank)
{
void __iomem *reg = bank->base;
u32 l;
@@ -559,7 +561,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
return l;
}
-static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+static void omap_enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
u32 l;
@@ -581,7 +583,7 @@ static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
writel_relaxed(l, reg);
}
-static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+static void omap_disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
u32 l;
@@ -603,12 +605,13 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
writel_relaxed(l, reg);
}
-static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
+static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, int gpio,
+ int enable)
{
if (enable)
- _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
+ omap_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
else
- _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
+ omap_disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
/*
@@ -619,7 +622,7 @@ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int ena
* enabled. When system is suspended, only selected GPIO interrupts need
* to have wake-up enabled.
*/
-static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
+static int omap_set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
u32 gpio_bit = GPIO_BIT(bank, gpio);
unsigned long flags;
@@ -642,22 +645,22 @@ static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
return 0;
}
-static void _reset_gpio(struct gpio_bank *bank, int gpio)
+static void omap_reset_gpio(struct gpio_bank *bank, int gpio)
{
- _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
- _set_gpio_irqenable(bank, gpio, 0);
- _clear_gpio_irqstatus(bank, gpio);
- _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
- _clear_gpio_debounce(bank, gpio);
+ omap_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
+ omap_set_gpio_irqenable(bank, gpio, 0);
+ omap_clear_gpio_irqstatus(bank, gpio);
+ omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
+ omap_clear_gpio_debounce(bank, gpio);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
-static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
+static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
{
- struct gpio_bank *bank = _irq_data_get_bank(d);
- unsigned int gpio = irq_to_gpio(bank, d->hwirq);
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
- return _set_gpio_wakeup(bank, gpio, enable);
+ return omap_set_gpio_wakeup(bank, gpio, enable);
}
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
@@ -678,8 +681,8 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
* not already been requested.
*/
if (!LINE_USED(bank->irq_usage, offset)) {
- _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
- _enable_gpio_module(bank, offset);
+ omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
+ omap_enable_gpio_module(bank, offset);
}
bank->mod_usage |= BIT(offset);
spin_unlock_irqrestore(&bank->lock, flags);
@@ -694,8 +697,8 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&bank->lock, flags);
bank->mod_usage &= ~(BIT(offset));
- _disable_gpio_module(bank, offset);
- _reset_gpio(bank, bank->chip.base + offset);
+ omap_disable_gpio_module(bank, offset);
+ omap_reset_gpio(bank, bank->chip.base + offset);
spin_unlock_irqrestore(&bank->lock, flags);
/*
@@ -715,7 +718,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
* line's interrupt handler has been run, we may miss some nested
* interrupts.
*/
-static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
void __iomem *isr_reg = NULL;
u32 isr;
@@ -738,7 +741,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
u32 isr_saved, level_mask = 0;
u32 enabled;
- enabled = _get_gpio_irqbank_mask(bank);
+ enabled = omap_get_gpio_irqbank_mask(bank);
isr_saved = isr = readl_relaxed(isr_reg) & enabled;
if (bank->level_mask)
@@ -747,9 +750,9 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
executing them */
- _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
- _clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
+ omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
+ omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
+ omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
/* if there is only edge sensitive GPIO pin interrupts
configured, we could unmask GPIO bank interrupt immediately */
@@ -773,7 +776,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
* This will be indicated in the bank toggle_mask.
*/
if (bank->toggle_mask & (BIT(bit)))
- _toggle_gpio_edge_triggering(bank, bit);
+ omap_toggle_gpio_edge_triggering(bank, bit);
generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
bit));
@@ -789,18 +792,18 @@ exit:
pm_runtime_put(bank->dev);
}
-static void gpio_irq_shutdown(struct irq_data *d)
+static void omap_gpio_irq_shutdown(struct irq_data *d)
{
- struct gpio_bank *bank = _irq_data_get_bank(d);
- unsigned int gpio = irq_to_gpio(bank, d->hwirq);
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
unsigned long flags;
unsigned offset = GPIO_INDEX(bank, gpio);
spin_lock_irqsave(&bank->lock, flags);
gpio_unlock_as_irq(&bank->chip, offset);
bank->irq_usage &= ~(BIT(offset));
- _disable_gpio_module(bank, offset);
- _reset_gpio(bank, gpio);
+ omap_disable_gpio_module(bank, offset);
+ omap_reset_gpio(bank, gpio);
spin_unlock_irqrestore(&bank->lock, flags);
/*
@@ -811,57 +814,57 @@ static void gpio_irq_shutdown(struct irq_data *d)
pm_runtime_put(bank->dev);
}
-static void gpio_ack_irq(struct irq_data *d)
+static void omap_gpio_ack_irq(struct irq_data *d)
{
- struct gpio_bank *bank = _irq_data_get_bank(d);
- unsigned int gpio = irq_to_gpio(bank, d->hwirq);
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
- _clear_gpio_irqstatus(bank, gpio);
+ omap_clear_gpio_irqstatus(bank, gpio);
}
-static void gpio_mask_irq(struct irq_data *d)
+static void omap_gpio_mask_irq(struct irq_data *d)
{
- struct gpio_bank *bank = _irq_data_get_bank(d);
- unsigned int gpio = irq_to_gpio(bank, d->hwirq);
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_irqenable(bank, gpio, 0);
- _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
+ omap_set_gpio_irqenable(bank, gpio, 0);
+ omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
spin_unlock_irqrestore(&bank->lock, flags);
}
-static void gpio_unmask_irq(struct irq_data *d)
+static void omap_gpio_unmask_irq(struct irq_data *d)
{
- struct gpio_bank *bank = _irq_data_get_bank(d);
- unsigned int gpio = irq_to_gpio(bank, d->hwirq);
+ struct gpio_bank *bank = omap_irq_data_get_bank(d);
+ unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
unsigned int irq_mask = GPIO_BIT(bank, gpio);
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
if (trigger)
- _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
+ omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
/* For level-triggered GPIOs, the clearing must be done after
* the HW source is cleared, thus after the handler has run */
if (bank->level_mask & irq_mask) {
- _set_gpio_irqenable(bank, gpio, 0);
- _clear_gpio_irqstatus(bank, gpio);
+ omap_set_gpio_irqenable(bank, gpio, 0);
+ omap_clear_gpio_irqstatus(bank, gpio);
}
- _set_gpio_irqenable(bank, gpio, 1);
+ omap_set_gpio_irqenable(bank, gpio, 1);
spin_unlock_irqrestore(&bank->lock, flags);
}
static struct irq_chip gpio_irq_chip = {
.name = "GPIO",
- .irq_shutdown = gpio_irq_shutdown,
- .irq_ack = gpio_ack_irq,
- .irq_mask = gpio_mask_irq,
- .irq_unmask = gpio_unmask_irq,
- .irq_set_type = gpio_irq_type,
- .irq_set_wake = gpio_wake_enable,
+ .irq_shutdown = omap_gpio_irq_shutdown,
+ .irq_ack = omap_gpio_ack_irq,
+ .irq_mask = omap_gpio_mask_irq,
+ .irq_unmask = omap_gpio_unmask_irq,
+ .irq_set_type = omap_gpio_irq_type,
+ .irq_set_wake = omap_gpio_wake_enable,
};
/*---------------------------------------------------------------------*/
@@ -918,7 +921,7 @@ static struct platform_device omap_mpuio_device = {
/* could list the /proc/iomem resources */
};
-static inline void mpuio_init(struct gpio_bank *bank)
+static inline void omap_mpuio_init(struct gpio_bank *bank)
{
platform_set_drvdata(&omap_mpuio_device, bank);
@@ -928,7 +931,7 @@ static inline void mpuio_init(struct gpio_bank *bank)
/*---------------------------------------------------------------------*/
-static int gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank;
unsigned long flags;
@@ -943,19 +946,19 @@ static int gpio_get_direction(struct gpio_chip *chip, unsigned offset)
return dir;
}
-static int gpio_input(struct gpio_chip *chip, unsigned offset)
+static int omap_gpio_input(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank;
unsigned long flags;
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_direction(bank, offset, 1);
+ omap_set_gpio_direction(bank, offset, 1);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
-static int gpio_get(struct gpio_chip *chip, unsigned offset)
+static int omap_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct gpio_bank *bank;
u32 mask;
@@ -963,13 +966,13 @@ static int gpio_get(struct gpio_chip *chip, unsigned offset)
bank = container_of(chip, struct gpio_bank, chip);
mask = (BIT(offset));
- if (gpio_is_input(bank, mask))
- return _get_gpio_datain(bank, offset);
+ if (omap_gpio_is_input(bank, mask))
+ return omap_get_gpio_datain(bank, offset);
else
- return _get_gpio_dataout(bank, offset);
+ return omap_get_gpio_dataout(bank, offset);
}
-static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
+static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_bank *bank;
unsigned long flags;
@@ -977,13 +980,13 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
bank->set_dataout(bank, offset, value);
- _set_gpio_direction(bank, offset, 0);
+ omap_set_gpio_direction(bank, offset, 0);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
-static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
- unsigned debounce)
+static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
+ unsigned debounce)
{
struct gpio_bank *bank;
unsigned long flags;
@@ -991,13 +994,13 @@ static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_debounce(bank, offset, debounce);
+ omap2_set_gpio_debounce(bank, offset, debounce);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
}
-static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_bank *bank;
unsigned long flags;
@@ -1025,11 +1028,6 @@ static void __init omap_gpio_show_rev(struct gpio_bank *bank)
called = true;
}
-/* This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-
static void omap_gpio_mod_init(struct gpio_bank *bank)
{
void __iomem *base = bank->base;
@@ -1043,8 +1041,10 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
return;
}
- _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
- _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
+ omap_gpio_rmw(base, bank->regs->irqenable, l,
+ bank->regs->irqenable_inv);
+ omap_gpio_rmw(base, bank->regs->irqstatus, l,
+ !bank->regs->irqenable_inv);
if (bank->regs->debounce_en)
writel_relaxed(0, base + bank->regs->debounce_en);
@@ -1078,10 +1078,10 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
/* NOTE: No ack required, reading IRQ status clears it. */
ct->chip.irq_mask = irq_gc_mask_set_bit;
ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_set_type = gpio_irq_type;
+ ct->chip.irq_set_type = omap_gpio_irq_type;
if (bank->regs->wkup_en)
- ct->chip.irq_set_wake = gpio_wake_enable;
+ ct->chip.irq_set_wake = omap_gpio_wake_enable;
ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
@@ -1101,12 +1101,12 @@ static int omap_gpio_chip_init(struct gpio_bank *bank)
*/
bank->chip.request = omap_gpio_request;
bank->chip.free = omap_gpio_free;
- bank->chip.get_direction = gpio_get_direction;
- bank->chip.direction_input = gpio_input;
- bank->chip.get = gpio_get;
- bank->chip.direction_output = gpio_output;
- bank->chip.set_debounce = gpio_debounce;
- bank->chip.set = gpio_set;
+ bank->chip.get_direction = omap_gpio_get_direction;
+ bank->chip.direction_input = omap_gpio_input;
+ bank->chip.get = omap_gpio_get;
+ bank->chip.direction_output = omap_gpio_output;
+ bank->chip.set_debounce = omap_gpio_debounce;
+ bank->chip.set = omap_gpio_set;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
@@ -1138,7 +1138,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank)
#endif
ret = gpiochip_irqchip_add(&bank->chip, &gpio_irq_chip,
- irq_base, gpio_irq_handler,
+ irq_base, omap_gpio_irq_handler,
IRQ_TYPE_NONE);
if (ret) {
@@ -1148,11 +1148,10 @@ static int omap_gpio_chip_init(struct gpio_bank *bank)
}
gpiochip_set_chained_irqchip(&bank->chip, &gpio_irq_chip,
- bank->irq, gpio_irq_handler);
+ bank->irq, omap_gpio_irq_handler);
for (j = 0; j < bank->width; j++) {
int irq = irq_find_mapping(bank->chip.irqdomain, j);
- irq_set_lockdep_class(irq, &gpio_lock_class);
if (bank->is_mpuio) {
omap_mpuio_alloc_gc(bank, irq, bank->width);
irq_set_chip_and_handler(irq, NULL, NULL);
@@ -1217,9 +1216,9 @@ static int omap_gpio_probe(struct platform_device *pdev)
}
if (bank->regs->set_dataout && bank->regs->clr_dataout)
- bank->set_dataout = _set_gpio_dataout_reg;
+ bank->set_dataout = omap_set_gpio_dataout_reg;
else
- bank->set_dataout = _set_gpio_dataout_mask;
+ bank->set_dataout = omap_set_gpio_dataout_mask;
spin_lock_init(&bank->lock);
@@ -1238,7 +1237,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
pm_runtime_get_sync(bank->dev);
if (bank->is_mpuio)
- mpuio_init(bank);
+ omap_mpuio_init(bank);
omap_gpio_mod_init(bank);
@@ -1320,7 +1319,7 @@ update_gpio_context_count:
bank->context_loss_count =
bank->get_context_loss_count(bank->dev);
- _gpio_dbck_disable(bank);
+ omap_gpio_dbck_disable(bank);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -1351,7 +1350,7 @@ static int omap_gpio_runtime_resume(struct device *dev)
bank->get_context_loss_count(bank->dev);
}
- _gpio_dbck_enable(bank);
+ omap_gpio_dbck_enable(bank);
/*
* In ->runtime_suspend(), level-triggered, wakeup-enabled
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 86bdbe362068..171a6389f9ce 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -210,7 +210,8 @@ static int palmas_gpio_remove(struct platform_device *pdev)
{
struct palmas_gpio *palmas_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&palmas_gpio->gpio_chip);
+ gpiochip_remove(&palmas_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver palmas_gpio_driver = {
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index e721a37c3473..f9961eea2120 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -765,12 +765,7 @@ static int pca953x_remove(struct i2c_client *client)
}
}
- ret = gpiochip_remove(&chip->gpio_chip);
- if (ret) {
- dev_err(&client->dev, "%s failed, %d\n",
- "gpiochip_remove()", ret);
- return ret;
- }
+ gpiochip_remove(&chip->gpio_chip);
return 0;
}
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 27b46751ea7e..236708ad0a5b 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -444,9 +444,7 @@ static int pcf857x_remove(struct i2c_client *client)
if (client->irq)
pcf857x_irq_domain_cleanup(gpio);
- status = gpiochip_remove(&gpio->chip);
- if (status)
- dev_err(&client->dev, "%s --> %d\n", "remove", status);
+ gpiochip_remove(&gpio->chip);
return status;
}
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index d6eac9b17db9..e0ac549dccb5 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -426,9 +426,7 @@ end:
err_request_irq:
irq_free_descs(irq_base, gpio_pins[chip->ioh]);
-
- if (gpiochip_remove(&chip->gpio))
- dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
+ gpiochip_remove(&chip->gpio);
err_gpiochip_add:
pci_iounmap(pdev, chip->base);
@@ -447,7 +445,6 @@ err_pci_enable:
static void pch_gpio_remove(struct pci_dev *pdev)
{
- int err;
struct pch_gpio *chip = pci_get_drvdata(pdev);
if (chip->irq_base != -1) {
@@ -456,10 +453,7 @@ static void pch_gpio_remove(struct pci_dev *pdev)
irq_free_descs(chip->irq_base, gpio_pins[chip->ioh]);
}
- err = gpiochip_remove(&chip->gpio);
- if (err)
- dev_err(&pdev->dev, "Failed gpiochip_remove\n");
-
+ gpiochip_remove(&chip->gpio);
pci_iounmap(pdev, chip->base);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 42e6e64f2120..ad3feec0075e 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -498,7 +498,7 @@ static int pxa_gpio_nums(struct platform_device *pdev)
}
#ifdef CONFIG_OF
-static struct of_device_id pxa_gpio_dt_ids[] = {
+static const struct of_device_id pxa_gpio_dt_ids[] = {
{ .compatible = "intel,pxa25x-gpio", .data = &pxa25x_id, },
{ .compatible = "intel,pxa26x-gpio", .data = &pxa26x_id, },
{ .compatible = "intel,pxa27x-gpio", .data = &pxa27x_id, },
@@ -649,6 +649,11 @@ static int pxa_gpio_probe(struct platform_device *pdev)
handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
+ } else {
+ if (irq0 > 0)
+ irq_set_chained_handler(irq0, pxa_gpio_demux_handler);
+ if (irq1 > 0)
+ irq_set_chained_handler(irq1, pxa_gpio_demux_handler);
}
irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index 562b0c4d9cc8..769233d2da6d 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -148,7 +148,8 @@ static int rc5t583_gpio_remove(struct platform_device *pdev)
{
struct rc5t583_gpio *rc5t583_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&rc5t583_gpio->gpio_chip);
+ gpiochip_remove(&rc5t583_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver rc5t583_gpio_driver = {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index b6ae89ea8811..bf6c09450fee 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -240,9 +240,9 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
/* testing on r8a7790 shows that INDT does not show correct pin state
* when configured as output, so use OUTDT in case of output pins */
if (gpio_rcar_read(gpio_to_priv(chip), INOUTSEL) & bit)
- return (int)(gpio_rcar_read(gpio_to_priv(chip), OUTDT) & bit);
+ return !!(gpio_rcar_read(gpio_to_priv(chip), OUTDT) & bit);
else
- return (int)(gpio_rcar_read(gpio_to_priv(chip), INDT) & bit);
+ return !!(gpio_rcar_read(gpio_to_priv(chip), INDT) & bit);
}
static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -472,11 +472,8 @@ err0:
static int gpio_rcar_remove(struct platform_device *pdev)
{
struct gpio_rcar_priv *p = platform_get_drvdata(pdev);
- int ret;
- ret = gpiochip_remove(&p->gpio_chip);
- if (ret)
- return ret;
+ gpiochip_remove(&p->gpio_chip);
irq_domain_remove(p->irq_domain);
pm_runtime_put(&pdev->dev);
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 9fa7e53331c9..d729bc8a554d 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -199,14 +199,11 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
static int rdc321x_gpio_remove(struct platform_device *pdev)
{
- int ret;
struct rdc321x_gpio *rdc321x_gpio_dev = platform_get_drvdata(pdev);
- ret = gpiochip_remove(&rdc321x_gpio_dev->chip);
- if (ret)
- dev_err(&pdev->dev, "failed to unregister chip\n");
+ gpiochip_remove(&rdc321x_gpio_dev->chip);
- return ret;
+ return 0;
}
static struct platform_driver rdc321x_gpio_driver = {
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 07105ee5c9ae..3810da47043f 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -32,10 +32,7 @@
#include <mach/map.h>
#include <mach/regs-gpio.h>
-
-#if defined(CONFIG_ARCH_S3C24XX) || defined(CONFIG_ARCH_S3C64XX)
#include <mach/gpio-samsung.h>
-#endif
#include <plat/cpu.h>
#include <plat/gpio-core.h>
@@ -358,47 +355,6 @@ static unsigned s3c24xx_gpio_getcfg_abank(struct samsung_gpio_chip *chip,
}
#endif
-#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
-static int s5p64x0_gpio_setcfg_rbank(struct samsung_gpio_chip *chip,
- unsigned int off, unsigned int cfg)
-{
- void __iomem *reg = chip->base;
- unsigned int shift;
- u32 con;
-
- switch (off) {
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- shift = (off & 7) * 4;
- reg -= 4;
- break;
- case 6:
- shift = ((off + 1) & 7) * 4;
- reg -= 4;
- break;
- default:
- shift = ((off + 1) & 7) * 4;
- break;
- }
-
- if (samsung_gpio_is_cfg_special(cfg)) {
- cfg &= 0xf;
- cfg <<= shift;
- }
-
- con = __raw_readl(reg);
- con &= ~(0xf << shift);
- con |= cfg;
- __raw_writel(con, reg);
-
- return 0;
-}
-#endif
-
static void __init samsung_gpiolib_set_cfg(struct samsung_gpio_cfg *chipcfg,
int nr_chips)
{
@@ -426,16 +382,6 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = {
};
#endif
-#if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450)
-static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
- .cfg_eint = 0x3,
- .set_config = s5p64x0_gpio_setcfg_rbank,
- .get_config = samsung_gpio_getcfg_4bit,
- .set_pull = samsung_gpio_setpull_updown,
- .get_pull = samsung_gpio_getpull_updown,
-};
-#endif
-
static struct samsung_gpio_cfg samsung_gpio_cfgs[] = {
[0] = {
.cfg_eint = 0x0,
@@ -708,91 +654,6 @@ static int s3c24xx_gpiolib_banka_output(struct gpio_chip *chip,
}
#endif
-/* The next set of routines are for the case of s5p64x0 bank r */
-
-static int s5p64x0_gpiolib_rbank_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
- void __iomem *base = ourchip->base;
- void __iomem *regcon = base;
- unsigned long con;
- unsigned long flags;
-
- switch (offset) {
- case 6:
- offset += 1;
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- regcon -= 4;
- break;
- default:
- offset -= 7;
- break;
- }
-
- samsung_gpio_lock(ourchip, flags);
-
- con = __raw_readl(regcon);
- con &= ~(0xf << con_4bit_shift(offset));
- __raw_writel(con, regcon);
-
- samsung_gpio_unlock(ourchip, flags);
-
- return 0;
-}
-
-static int s5p64x0_gpiolib_rbank_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct samsung_gpio_chip *ourchip = to_samsung_gpio(chip);
- void __iomem *base = ourchip->base;
- void __iomem *regcon = base;
- unsigned long con;
- unsigned long dat;
- unsigned long flags;
- unsigned con_offset = offset;
-
- switch (con_offset) {
- case 6:
- con_offset += 1;
- case 0:
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- regcon -= 4;
- break;
- default:
- con_offset -= 7;
- break;
- }
-
- samsung_gpio_lock(ourchip, flags);
-
- con = __raw_readl(regcon);
- con &= ~(0xf << con_4bit_shift(con_offset));
- con |= 0x1 << con_4bit_shift(con_offset);
-
- dat = __raw_readl(base + GPIODAT_OFF);
- if (value)
- dat |= 1 << offset;
- else
- dat &= ~(1 << offset);
-
- __raw_writel(con, regcon);
- __raw_writel(dat, base + GPIODAT_OFF);
-
- samsung_gpio_unlock(ourchip, flags);
-
- return 0;
-}
-
static void samsung_gpiolib_set(struct gpio_chip *chip,
unsigned offset, int value)
{
@@ -999,20 +860,6 @@ static void __init samsung_gpiolib_add_4bit2_chips(struct samsung_gpio_chip *chi
}
}
-static void __init s5p64x0_gpiolib_add_rbank(struct samsung_gpio_chip *chip,
- int nr_chips)
-{
- for (; nr_chips > 0; nr_chips--, chip++) {
- chip->chip.direction_input = s5p64x0_gpiolib_rbank_input;
- chip->chip.direction_output = s5p64x0_gpiolib_rbank_output;
-
- if (!chip->pm)
- chip->pm = __gpio_pm(&samsung_gpio_pm_4bit);
-
- samsung_gpiolib_add(chip);
- }
-}
-
int samsung_gpiolib_to_irq(struct gpio_chip *chip, unsigned int offset)
{
struct samsung_gpio_chip *samsung_chip = container_of(chip, struct samsung_gpio_chip, chip);
@@ -1319,773 +1166,9 @@ static struct samsung_gpio_chip s3c64xx_gpios_2bit[] = {
#endif
};
-/*
- * S5P6440 GPIO bank summary:
- *
- * Bank GPIOs Style SlpCon ExtInt Group
- * A 6 4Bit Yes 1
- * B 7 4Bit Yes 1
- * C 8 4Bit Yes 2
- * F 2 2Bit Yes 4 [1]
- * G 7 4Bit Yes 5
- * H 10 4Bit[2] Yes 6
- * I 16 2Bit Yes None
- * J 12 2Bit Yes None
- * N 16 2Bit No IRQ_EINT
- * P 8 2Bit Yes 8
- * R 15 4Bit[2] Yes 8
- */
-
-static struct samsung_gpio_chip s5p6440_gpios_4bit[] = {
-#ifdef CONFIG_CPU_S5P6440
- {
- .chip = {
- .base = S5P6440_GPA(0),
- .ngpio = S5P6440_GPIO_A_NR,
- .label = "GPA",
- },
- }, {
- .chip = {
- .base = S5P6440_GPB(0),
- .ngpio = S5P6440_GPIO_B_NR,
- .label = "GPB",
- },
- }, {
- .chip = {
- .base = S5P6440_GPC(0),
- .ngpio = S5P6440_GPIO_C_NR,
- .label = "GPC",
- },
- }, {
- .base = S5P64X0_GPG_BASE,
- .chip = {
- .base = S5P6440_GPG(0),
- .ngpio = S5P6440_GPIO_G_NR,
- .label = "GPG",
- },
- },
-#endif
-};
-
-static struct samsung_gpio_chip s5p6440_gpios_4bit2[] = {
-#ifdef CONFIG_CPU_S5P6440
- {
- .base = S5P64X0_GPH_BASE + 0x4,
- .chip = {
- .base = S5P6440_GPH(0),
- .ngpio = S5P6440_GPIO_H_NR,
- .label = "GPH",
- },
- },
-#endif
-};
-
-static struct samsung_gpio_chip s5p6440_gpios_rbank[] = {
-#ifdef CONFIG_CPU_S5P6440
- {
- .base = S5P64X0_GPR_BASE + 0x4,
- .config = &s5p64x0_gpio_cfg_rbank,
- .chip = {
- .base = S5P6440_GPR(0),
- .ngpio = S5P6440_GPIO_R_NR,
- .label = "GPR",
- },
- },
-#endif
-};
-
-static struct samsung_gpio_chip s5p6440_gpios_2bit[] = {
-#ifdef CONFIG_CPU_S5P6440
- {
- .base = S5P64X0_GPF_BASE,
- .config = &samsung_gpio_cfgs[6],
- .chip = {
- .base = S5P6440_GPF(0),
- .ngpio = S5P6440_GPIO_F_NR,
- .label = "GPF",
- },
- }, {
- .base = S5P64X0_GPI_BASE,
- .config = &samsung_gpio_cfgs[4],
- .chip = {
- .base = S5P6440_GPI(0),
- .ngpio = S5P6440_GPIO_I_NR,
- .label = "GPI",
- },
- }, {
- .base = S5P64X0_GPJ_BASE,
- .config = &samsung_gpio_cfgs[4],
- .chip = {
- .base = S5P6440_GPJ(0),
- .ngpio = S5P6440_GPIO_J_NR,
- .label = "GPJ",
- },
- }, {
- .base = S5P64X0_GPN_BASE,
- .config = &samsung_gpio_cfgs[5],
- .chip = {
- .base = S5P6440_GPN(0),
- .ngpio = S5P6440_GPIO_N_NR,
- .label = "GPN",
- },
- }, {
- .base = S5P64X0_GPP_BASE,
- .config = &samsung_gpio_cfgs[6],
- .chip = {
- .base = S5P6440_GPP(0),
- .ngpio = S5P6440_GPIO_P_NR,
- .label = "GPP",
- },
- },
-#endif
-};
-
-/*
- * S5P6450 GPIO bank summary:
- *
- * Bank GPIOs Style SlpCon ExtInt Group
- * A 6 4Bit Yes 1
- * B 7 4Bit Yes 1
- * C 8 4Bit Yes 2
- * D 8 4Bit Yes None
- * F 2 2Bit Yes None
- * G 14 4Bit[2] Yes 5
- * H 10 4Bit[2] Yes 6
- * I 16 2Bit Yes None
- * J 12 2Bit Yes None
- * K 5 4Bit Yes None
- * N 16 2Bit No IRQ_EINT
- * P 11 2Bit Yes 8
- * Q 14 2Bit Yes None
- * R 15 4Bit[2] Yes None
- * S 8 2Bit Yes None
- *
- * [1] BANKF pins 14,15 do not form part of the external interrupt sources
- * [2] BANK has two control registers, GPxCON0 and GPxCON1
- */
-
-static struct samsung_gpio_chip s5p6450_gpios_4bit[] = {
-#ifdef CONFIG_CPU_S5P6450
- {
- .chip = {
- .base = S5P6450_GPA(0),
- .ngpio = S5P6450_GPIO_A_NR,
- .label = "GPA",
- },
- }, {
- .chip = {
- .base = S5P6450_GPB(0),
- .ngpio = S5P6450_GPIO_B_NR,
- .label = "GPB",
- },
- }, {
- .chip = {
- .base = S5P6450_GPC(0),
- .ngpio = S5P6450_GPIO_C_NR,
- .label = "GPC",
- },
- }, {
- .chip = {
- .base = S5P6450_GPD(0),
- .ngpio = S5P6450_GPIO_D_NR,
- .label = "GPD",
- },
- }, {
- .base = S5P6450_GPK_BASE,
- .chip = {
- .base = S5P6450_GPK(0),
- .ngpio = S5P6450_GPIO_K_NR,
- .label = "GPK",
- },
- },
-#endif
-};
-
-static struct samsung_gpio_chip s5p6450_gpios_4bit2[] = {
-#ifdef CONFIG_CPU_S5P6450
- {
- .base = S5P64X0_GPG_BASE + 0x4,
- .chip = {
- .base = S5P6450_GPG(0),
- .ngpio = S5P6450_GPIO_G_NR,
- .label = "GPG",
- },
- }, {
- .base = S5P64X0_GPH_BASE + 0x4,
- .chip = {
- .base = S5P6450_GPH(0),
- .ngpio = S5P6450_GPIO_H_NR,
- .label = "GPH",
- },
- },
-#endif
-};
-
-static struct samsung_gpio_chip s5p6450_gpios_rbank[] = {
-#ifdef CONFIG_CPU_S5P6450
- {
- .base = S5P64X0_GPR_BASE + 0x4,
- .config = &s5p64x0_gpio_cfg_rbank,
- .chip = {
- .base = S5P6450_GPR(0),
- .ngpio = S5P6450_GPIO_R_NR,
- .label = "GPR",
- },
- },
-#endif
-};
-
-static struct samsung_gpio_chip s5p6450_gpios_2bit[] = {
-#ifdef CONFIG_CPU_S5P6450
- {
- .base = S5P64X0_GPF_BASE,
- .config = &samsung_gpio_cfgs[6],
- .chip = {
- .base = S5P6450_GPF(0),
- .ngpio = S5P6450_GPIO_F_NR,
- .label = "GPF",
- },
- }, {
- .base = S5P64X0_GPI_BASE,
- .config = &samsung_gpio_cfgs[4],
- .chip = {
- .base = S5P6450_GPI(0),
- .ngpio = S5P6450_GPIO_I_NR,
- .label = "GPI",
- },
- }, {
- .base = S5P64X0_GPJ_BASE,
- .config = &samsung_gpio_cfgs[4],
- .chip = {
- .base = S5P6450_GPJ(0),
- .ngpio = S5P6450_GPIO_J_NR,
- .label = "GPJ",
- },
- }, {
- .base = S5P64X0_GPN_BASE,
- .config = &samsung_gpio_cfgs[5],
- .chip = {
- .base = S5P6450_GPN(0),
- .ngpio = S5P6450_GPIO_N_NR,
- .label = "GPN",
- },
- }, {
- .base = S5P64X0_GPP_BASE,
- .config = &samsung_gpio_cfgs[6],
- .chip = {
- .base = S5P6450_GPP(0),
- .ngpio = S5P6450_GPIO_P_NR,
- .label = "GPP",
- },
- }, {
- .base = S5P6450_GPQ_BASE,
- .config = &samsung_gpio_cfgs[5],
- .chip = {
- .base = S5P6450_GPQ(0),
- .ngpio = S5P6450_GPIO_Q_NR,
- .label = "GPQ",
- },
- }, {
- .base = S5P6450_GPS_BASE,
- .config = &samsung_gpio_cfgs[6],
- .chip = {
- .base = S5P6450_GPS(0),
- .ngpio = S5P6450_GPIO_S_NR,
- .label = "GPS",
- },
- },
-#endif
-};
-
-/*
- * S5PC100 GPIO bank summary:
- *
- * Bank GPIOs Style INT Type
- * A0 8 4Bit GPIO_INT0
- * A1 5 4Bit GPIO_INT1
- * B 8 4Bit GPIO_INT2
- * C 5 4Bit GPIO_INT3
- * D 7 4Bit GPIO_INT4
- * E0 8 4Bit GPIO_INT5
- * E1 6 4Bit GPIO_INT6
- * F0 8 4Bit GPIO_INT7
- * F1 8 4Bit GPIO_INT8
- * F2 8 4Bit GPIO_INT9
- * F3 4 4Bit GPIO_INT10
- * G0 8 4Bit GPIO_INT11
- * G1 3 4Bit GPIO_INT12
- * G2 7 4Bit GPIO_INT13
- * G3 7 4Bit GPIO_INT14
- * H0 8 4Bit WKUP_INT
- * H1 8 4Bit WKUP_INT
- * H2 8 4Bit WKUP_INT
- * H3 8 4Bit WKUP_INT
- * I 8 4Bit GPIO_INT15
- * J0 8 4Bit GPIO_INT16
- * J1 5 4Bit GPIO_INT17
- * J2 8 4Bit GPIO_INT18
- * J3 8 4Bit GPIO_INT19
- * J4 4 4Bit GPIO_INT20
- * K0 8 4Bit None
- * K1 6 4Bit None
- * K2 8 4Bit None
- * K3 8 4Bit None
- * L0 8 4Bit None
- * L1 8 4Bit None
- * L2 8 4Bit None
- * L3 8 4Bit None
- */
-
-static struct samsung_gpio_chip s5pc100_gpios_4bit[] = {
-#ifdef CONFIG_CPU_S5PC100
- {
- .chip = {
- .base = S5PC100_GPA0(0),
- .ngpio = S5PC100_GPIO_A0_NR,
- .label = "GPA0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPA1(0),
- .ngpio = S5PC100_GPIO_A1_NR,
- .label = "GPA1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPB(0),
- .ngpio = S5PC100_GPIO_B_NR,
- .label = "GPB",
- },
- }, {
- .chip = {
- .base = S5PC100_GPC(0),
- .ngpio = S5PC100_GPIO_C_NR,
- .label = "GPC",
- },
- }, {
- .chip = {
- .base = S5PC100_GPD(0),
- .ngpio = S5PC100_GPIO_D_NR,
- .label = "GPD",
- },
- }, {
- .chip = {
- .base = S5PC100_GPE0(0),
- .ngpio = S5PC100_GPIO_E0_NR,
- .label = "GPE0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPE1(0),
- .ngpio = S5PC100_GPIO_E1_NR,
- .label = "GPE1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF0(0),
- .ngpio = S5PC100_GPIO_F0_NR,
- .label = "GPF0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF1(0),
- .ngpio = S5PC100_GPIO_F1_NR,
- .label = "GPF1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF2(0),
- .ngpio = S5PC100_GPIO_F2_NR,
- .label = "GPF2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPF3(0),
- .ngpio = S5PC100_GPIO_F3_NR,
- .label = "GPF3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG0(0),
- .ngpio = S5PC100_GPIO_G0_NR,
- .label = "GPG0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG1(0),
- .ngpio = S5PC100_GPIO_G1_NR,
- .label = "GPG1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG2(0),
- .ngpio = S5PC100_GPIO_G2_NR,
- .label = "GPG2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPG3(0),
- .ngpio = S5PC100_GPIO_G3_NR,
- .label = "GPG3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPI(0),
- .ngpio = S5PC100_GPIO_I_NR,
- .label = "GPI",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ0(0),
- .ngpio = S5PC100_GPIO_J0_NR,
- .label = "GPJ0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ1(0),
- .ngpio = S5PC100_GPIO_J1_NR,
- .label = "GPJ1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ2(0),
- .ngpio = S5PC100_GPIO_J2_NR,
- .label = "GPJ2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ3(0),
- .ngpio = S5PC100_GPIO_J3_NR,
- .label = "GPJ3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPJ4(0),
- .ngpio = S5PC100_GPIO_J4_NR,
- .label = "GPJ4",
- },
- }, {
- .chip = {
- .base = S5PC100_GPK0(0),
- .ngpio = S5PC100_GPIO_K0_NR,
- .label = "GPK0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPK1(0),
- .ngpio = S5PC100_GPIO_K1_NR,
- .label = "GPK1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPK2(0),
- .ngpio = S5PC100_GPIO_K2_NR,
- .label = "GPK2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPK3(0),
- .ngpio = S5PC100_GPIO_K3_NR,
- .label = "GPK3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPL0(0),
- .ngpio = S5PC100_GPIO_L0_NR,
- .label = "GPL0",
- },
- }, {
- .chip = {
- .base = S5PC100_GPL1(0),
- .ngpio = S5PC100_GPIO_L1_NR,
- .label = "GPL1",
- },
- }, {
- .chip = {
- .base = S5PC100_GPL2(0),
- .ngpio = S5PC100_GPIO_L2_NR,
- .label = "GPL2",
- },
- }, {
- .chip = {
- .base = S5PC100_GPL3(0),
- .ngpio = S5PC100_GPIO_L3_NR,
- .label = "GPL3",
- },
- }, {
- .chip = {
- .base = S5PC100_GPL4(0),
- .ngpio = S5PC100_GPIO_L4_NR,
- .label = "GPL4",
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC00),
- .irq_base = IRQ_EINT(0),
- .chip = {
- .base = S5PC100_GPH0(0),
- .ngpio = S5PC100_GPIO_H0_NR,
- .label = "GPH0",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC20),
- .irq_base = IRQ_EINT(8),
- .chip = {
- .base = S5PC100_GPH1(0),
- .ngpio = S5PC100_GPIO_H1_NR,
- .label = "GPH1",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC40),
- .irq_base = IRQ_EINT(16),
- .chip = {
- .base = S5PC100_GPH2(0),
- .ngpio = S5PC100_GPIO_H2_NR,
- .label = "GPH2",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC60),
- .irq_base = IRQ_EINT(24),
- .chip = {
- .base = S5PC100_GPH3(0),
- .ngpio = S5PC100_GPIO_H3_NR,
- .label = "GPH3",
- .to_irq = samsung_gpiolib_to_irq,
- },
- },
-#endif
-};
-
-/*
- * Followings are the gpio banks in S5PV210/S5PC110
- *
- * The 'config' member when left to NULL, is initialized to the default
- * structure samsung_gpio_cfgs[3] in the init function below.
- *
- * The 'base' member is also initialized in the init function below.
- * Note: The initialization of 'base' member of samsung_gpio_chip structure
- * uses the above macro and depends on the banks being listed in order here.
- */
-
-static struct samsung_gpio_chip s5pv210_gpios_4bit[] = {
-#ifdef CONFIG_CPU_S5PV210
- {
- .chip = {
- .base = S5PV210_GPA0(0),
- .ngpio = S5PV210_GPIO_A0_NR,
- .label = "GPA0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPA1(0),
- .ngpio = S5PV210_GPIO_A1_NR,
- .label = "GPA1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPB(0),
- .ngpio = S5PV210_GPIO_B_NR,
- .label = "GPB",
- },
- }, {
- .chip = {
- .base = S5PV210_GPC0(0),
- .ngpio = S5PV210_GPIO_C0_NR,
- .label = "GPC0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPC1(0),
- .ngpio = S5PV210_GPIO_C1_NR,
- .label = "GPC1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPD0(0),
- .ngpio = S5PV210_GPIO_D0_NR,
- .label = "GPD0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPD1(0),
- .ngpio = S5PV210_GPIO_D1_NR,
- .label = "GPD1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPE0(0),
- .ngpio = S5PV210_GPIO_E0_NR,
- .label = "GPE0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPE1(0),
- .ngpio = S5PV210_GPIO_E1_NR,
- .label = "GPE1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF0(0),
- .ngpio = S5PV210_GPIO_F0_NR,
- .label = "GPF0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF1(0),
- .ngpio = S5PV210_GPIO_F1_NR,
- .label = "GPF1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF2(0),
- .ngpio = S5PV210_GPIO_F2_NR,
- .label = "GPF2",
- },
- }, {
- .chip = {
- .base = S5PV210_GPF3(0),
- .ngpio = S5PV210_GPIO_F3_NR,
- .label = "GPF3",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG0(0),
- .ngpio = S5PV210_GPIO_G0_NR,
- .label = "GPG0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG1(0),
- .ngpio = S5PV210_GPIO_G1_NR,
- .label = "GPG1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG2(0),
- .ngpio = S5PV210_GPIO_G2_NR,
- .label = "GPG2",
- },
- }, {
- .chip = {
- .base = S5PV210_GPG3(0),
- .ngpio = S5PV210_GPIO_G3_NR,
- .label = "GPG3",
- },
- }, {
- .chip = {
- .base = S5PV210_GPI(0),
- .ngpio = S5PV210_GPIO_I_NR,
- .label = "GPI",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ0(0),
- .ngpio = S5PV210_GPIO_J0_NR,
- .label = "GPJ0",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ1(0),
- .ngpio = S5PV210_GPIO_J1_NR,
- .label = "GPJ1",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ2(0),
- .ngpio = S5PV210_GPIO_J2_NR,
- .label = "GPJ2",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ3(0),
- .ngpio = S5PV210_GPIO_J3_NR,
- .label = "GPJ3",
- },
- }, {
- .chip = {
- .base = S5PV210_GPJ4(0),
- .ngpio = S5PV210_GPIO_J4_NR,
- .label = "GPJ4",
- },
- }, {
- .chip = {
- .base = S5PV210_MP01(0),
- .ngpio = S5PV210_GPIO_MP01_NR,
- .label = "MP01",
- },
- }, {
- .chip = {
- .base = S5PV210_MP02(0),
- .ngpio = S5PV210_GPIO_MP02_NR,
- .label = "MP02",
- },
- }, {
- .chip = {
- .base = S5PV210_MP03(0),
- .ngpio = S5PV210_GPIO_MP03_NR,
- .label = "MP03",
- },
- }, {
- .chip = {
- .base = S5PV210_MP04(0),
- .ngpio = S5PV210_GPIO_MP04_NR,
- .label = "MP04",
- },
- }, {
- .chip = {
- .base = S5PV210_MP05(0),
- .ngpio = S5PV210_GPIO_MP05_NR,
- .label = "MP05",
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC00),
- .irq_base = IRQ_EINT(0),
- .chip = {
- .base = S5PV210_GPH0(0),
- .ngpio = S5PV210_GPIO_H0_NR,
- .label = "GPH0",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC20),
- .irq_base = IRQ_EINT(8),
- .chip = {
- .base = S5PV210_GPH1(0),
- .ngpio = S5PV210_GPIO_H1_NR,
- .label = "GPH1",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC40),
- .irq_base = IRQ_EINT(16),
- .chip = {
- .base = S5PV210_GPH2(0),
- .ngpio = S5PV210_GPIO_H2_NR,
- .label = "GPH2",
- .to_irq = samsung_gpiolib_to_irq,
- },
- }, {
- .base = (S5P_VA_GPIO + 0xC60),
- .irq_base = IRQ_EINT(24),
- .chip = {
- .base = S5PV210_GPH3(0),
- .ngpio = S5PV210_GPIO_H3_NR,
- .label = "GPH3",
- .to_irq = samsung_gpiolib_to_irq,
- },
- },
-#endif
-};
-
/* TODO: cleanup soc_is_* */
static __init int samsung_gpiolib_init(void)
{
- struct samsung_gpio_chip *chip;
- int i, nr_chips;
- int group = 0;
-
/*
* Currently there are two drivers that can provide GPIO support for
* Samsung SoCs. For device tree enabled platforms, the new
@@ -2109,54 +1192,6 @@ static __init int samsung_gpiolib_init(void)
S3C64XX_VA_GPIO);
samsung_gpiolib_add_4bit2_chips(s3c64xx_gpios_4bit2,
ARRAY_SIZE(s3c64xx_gpios_4bit2));
- } else if (soc_is_s5p6440()) {
- samsung_gpiolib_add_2bit_chips(s5p6440_gpios_2bit,
- ARRAY_SIZE(s5p6440_gpios_2bit), NULL, 0x0);
- samsung_gpiolib_add_4bit_chips(s5p6440_gpios_4bit,
- ARRAY_SIZE(s5p6440_gpios_4bit), S5P_VA_GPIO);
- samsung_gpiolib_add_4bit2_chips(s5p6440_gpios_4bit2,
- ARRAY_SIZE(s5p6440_gpios_4bit2));
- s5p64x0_gpiolib_add_rbank(s5p6440_gpios_rbank,
- ARRAY_SIZE(s5p6440_gpios_rbank));
- } else if (soc_is_s5p6450()) {
- samsung_gpiolib_add_2bit_chips(s5p6450_gpios_2bit,
- ARRAY_SIZE(s5p6450_gpios_2bit), NULL, 0x0);
- samsung_gpiolib_add_4bit_chips(s5p6450_gpios_4bit,
- ARRAY_SIZE(s5p6450_gpios_4bit), S5P_VA_GPIO);
- samsung_gpiolib_add_4bit2_chips(s5p6450_gpios_4bit2,
- ARRAY_SIZE(s5p6450_gpios_4bit2));
- s5p64x0_gpiolib_add_rbank(s5p6450_gpios_rbank,
- ARRAY_SIZE(s5p6450_gpios_rbank));
- } else if (soc_is_s5pc100()) {
- group = 0;
- chip = s5pc100_gpios_4bit;
- nr_chips = ARRAY_SIZE(s5pc100_gpios_4bit);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &samsung_gpio_cfgs[3];
- chip->group = group++;
- }
- }
- samsung_gpiolib_add_4bit_chips(s5pc100_gpios_4bit, nr_chips, S5P_VA_GPIO);
-#if defined(CONFIG_CPU_S5PC100) && defined(CONFIG_S5P_GPIO_INT)
- s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
-#endif
- } else if (soc_is_s5pv210()) {
- group = 0;
- chip = s5pv210_gpios_4bit;
- nr_chips = ARRAY_SIZE(s5pv210_gpios_4bit);
-
- for (i = 0; i < nr_chips; i++, chip++) {
- if (!chip->config) {
- chip->config = &samsung_gpio_cfgs[3];
- chip->group = group++;
- }
- }
- samsung_gpiolib_add_4bit_chips(s5pv210_gpios_4bit, nr_chips, S5P_VA_GPIO);
-#if defined(CONFIG_CPU_S5PV210) && defined(CONFIG_S5P_GPIO_INT)
- s5p_register_gpioint_bank(IRQ_GPIOINT, 0, S5P_GPIOINT_GROUP_MAXNR);
-#endif
} else {
WARN(1, "Unknown SoC in gpio-samsung, no GPIOs added\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index a9b1cd16c848..41e91d70301e 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -290,8 +290,7 @@ static int sch_gpio_probe(struct platform_device *pdev)
return 0;
err_sch_gpio_resume:
- if (gpiochip_remove(&sch_gpio_core))
- dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
+ gpiochip_remove(&sch_gpio_core);
err_sch_gpio_core:
release_region(res->start, resource_size(res));
@@ -304,23 +303,14 @@ static int sch_gpio_remove(struct platform_device *pdev)
{
struct resource *res;
if (gpio_ba) {
- int err;
- err = gpiochip_remove(&sch_gpio_core);
- if (err)
- dev_err(&pdev->dev, "%s failed, %d\n",
- "gpiochip_remove()", err);
- err = gpiochip_remove(&sch_gpio_resume);
- if (err)
- dev_err(&pdev->dev, "%s failed, %d\n",
- "gpiochip_remove()", err);
+ gpiochip_remove(&sch_gpio_core);
+ gpiochip_remove(&sch_gpio_resume);
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
release_region(res->start, resource_size(res));
gpio_ba = 0;
-
- return err;
}
return 0;
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index f942b80ee403..0cb11413e814 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -291,14 +291,12 @@ static int sch311x_gpio_remove(struct platform_device *pdev)
{
struct sch311x_pdev_data *pdata = pdev->dev.platform_data;
struct sch311x_gpio_priv *priv = platform_get_drvdata(pdev);
- int err, i;
+ int i;
release_region(pdata->runtime_reg + GP1, 6);
for (i = 0; i < ARRAY_SIZE(priv->blocks); i++) {
- err = gpiochip_remove(&priv->blocks[i].chip);
- if (err)
- return err;
+ gpiochip_remove(&priv->blocks[i].chip);
dev_info(&pdev->dev,
"SMSC SCH311x GPIO block %d unregistered.\n", i);
}
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 7c6c518929bc..d8da36cd8123 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -265,9 +265,7 @@ static void sdv_gpio_remove(struct pci_dev *pdev)
free_irq(pdev->irq, sd);
irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
- if (gpiochip_remove(&sd->bgpio.gc))
- dev_err(&pdev->dev, "gpiochip_remove() failed.\n");
-
+ gpiochip_remove(&sd->bgpio.gc);
pci_release_region(pdev, GPIO_BAR);
iounmap(sd->gpio_pub_base);
pci_disable_device(pdev);
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 628b58494294..845025a57240 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -10,8 +10,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/mfd/stmpe.h>
@@ -31,9 +29,7 @@ struct stmpe_gpio {
struct stmpe *stmpe;
struct device *dev;
struct mutex irq_lock;
- struct irq_domain *domain;
unsigned norequest_mask;
-
/* Caches of interrupt control registers for bus_lock */
u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS];
u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS];
@@ -101,13 +97,6 @@ static int stmpe_gpio_direction_input(struct gpio_chip *chip,
return stmpe_set_bits(stmpe, reg, mask, 0);
}
-static int stmpe_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
-
- return irq_create_mapping(stmpe_gpio->domain, offset);
-}
-
static int stmpe_gpio_request(struct gpio_chip *chip, unsigned offset)
{
struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(chip);
@@ -126,14 +115,14 @@ static struct gpio_chip template_chip = {
.get = stmpe_gpio_get,
.direction_output = stmpe_gpio_direction_output,
.set = stmpe_gpio_set,
- .to_irq = stmpe_gpio_to_irq,
.request = stmpe_gpio_request,
.can_sleep = true,
};
static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
- struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -160,14 +149,16 @@ static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
static void stmpe_gpio_irq_lock(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc);
mutex_lock(&stmpe_gpio->irq_lock);
}
static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
static const u8 regmap[] = {
@@ -200,7 +191,8 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
static void stmpe_gpio_irq_mask(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -210,7 +202,8 @@ static void stmpe_gpio_irq_mask(struct irq_data *d)
static void stmpe_gpio_irq_unmask(struct irq_data *d)
{
- struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct stmpe_gpio *stmpe_gpio = to_stmpe_gpio(gc);
int offset = d->hwirq;
int regoffset = offset / 8;
int mask = 1 << (offset % 8);
@@ -253,7 +246,7 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
while (stat) {
int bit = __ffs(stat);
int line = bank * 8 + bit;
- int child_irq = irq_find_mapping(stmpe_gpio->domain,
+ int child_irq = irq_find_mapping(stmpe_gpio->chip.irqdomain,
line);
handle_nested_irq(child_irq);
@@ -271,56 +264,6 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
return IRQ_HANDLED;
}
-static int stmpe_gpio_irq_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct stmpe_gpio *stmpe_gpio = d->host_data;
-
- if (!stmpe_gpio)
- return -EINVAL;
-
- irq_set_chip_data(irq, stmpe_gpio);
- irq_set_chip_and_handler(irq, &stmpe_gpio_irq_chip,
- handle_simple_irq);
- irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
-
- return 0;
-}
-
-static void stmpe_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
-{
-#ifdef CONFIG_ARM
- set_irq_flags(irq, 0);
-#endif
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
-}
-
-static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
- .unmap = stmpe_gpio_irq_unmap,
- .map = stmpe_gpio_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio,
- struct device_node *np)
-{
- stmpe_gpio->domain = irq_domain_add_simple(np,
- stmpe_gpio->chip.ngpio, 0,
- &stmpe_gpio_irq_simple_ops, stmpe_gpio);
- if (!stmpe_gpio->domain) {
- dev_err(stmpe_gpio->dev, "failed to create irqdomain\n");
- return -ENOSYS;
- }
-
- return 0;
-}
-
static int stmpe_gpio_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
@@ -358,30 +301,37 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
if (irq < 0)
dev_info(&pdev->dev,
- "device configured in no-irq mode; "
+ "device configured in no-irq mode: "
"irqs are not available\n");
ret = stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
if (ret)
goto out_free;
- if (irq >= 0) {
- ret = stmpe_gpio_irq_init(stmpe_gpio, np);
- if (ret)
- goto out_disable;
-
- ret = request_threaded_irq(irq, NULL, stmpe_gpio_irq,
- IRQF_ONESHOT, "stmpe-gpio", stmpe_gpio);
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ stmpe_gpio_irq, IRQF_ONESHOT,
+ "stmpe-gpio", stmpe_gpio);
if (ret) {
dev_err(&pdev->dev, "unable to get irq: %d\n", ret);
goto out_disable;
}
+ ret = gpiochip_irqchip_add(&stmpe_gpio->chip,
+ &stmpe_gpio_irq_chip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "could not connect irqchip to gpiochip\n");
+ return ret;
+ }
}
ret = gpiochip_add(&stmpe_gpio->chip);
if (ret) {
dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret);
- goto out_freeirq;
+ goto out_disable;
}
if (pdata && pdata->setup)
@@ -391,9 +341,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
return 0;
-out_freeirq:
- if (irq >= 0)
- free_irq(irq, stmpe_gpio);
out_disable:
stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
out_free:
@@ -406,24 +353,14 @@ static int stmpe_gpio_remove(struct platform_device *pdev)
struct stmpe_gpio *stmpe_gpio = platform_get_drvdata(pdev);
struct stmpe *stmpe = stmpe_gpio->stmpe;
struct stmpe_gpio_platform_data *pdata = stmpe->pdata->gpio;
- int irq = platform_get_irq(pdev, 0);
- int ret;
if (pdata && pdata->remove)
pdata->remove(stmpe, stmpe_gpio->chip.base);
- ret = gpiochip_remove(&stmpe_gpio->chip);
- if (ret < 0) {
- dev_err(stmpe_gpio->dev,
- "unable to remove gpiochip: %d\n", ret);
- return ret;
- }
+ gpiochip_remove(&stmpe_gpio->chip);
stmpe_disable(stmpe, STMPE_BLOCK_GPIO);
- if (irq >= 0)
- free_irq(irq, stmpe_gpio);
-
kfree(stmpe_gpio);
return 0;
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index b51ca9f5c140..bce6c6108f20 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -615,19 +615,16 @@ static int sx150x_probe(struct i2c_client *client,
return 0;
probe_fail_post_gpiochip_add:
- WARN_ON(gpiochip_remove(&chip->gpio_chip) < 0);
+ gpiochip_remove(&chip->gpio_chip);
return rc;
}
static int sx150x_remove(struct i2c_client *client)
{
struct sx150x_chip *chip;
- int rc;
chip = i2c_get_clientdata(client);
- rc = gpiochip_remove(&chip->gpio_chip);
- if (rc < 0)
- return rc;
+ gpiochip_remove(&chip->gpio_chip);
if (chip->irq_summary >= 0)
sx150x_remove_irq_chip(chip);
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index b50fe1297748..30884fbc750d 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -172,7 +172,8 @@ static int syscon_gpio_remove(struct platform_device *pdev)
{
struct syscon_gpio_priv *priv = platform_get_drvdata(pdev);
- return gpiochip_remove(&priv->chip);
+ gpiochip_remove(&priv->chip);
+ return 0;
}
static struct platform_driver syscon_gpio_driver = {
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 07bce97647a6..9e615be8032c 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -291,7 +291,6 @@ fail_ioremap:
static int __exit tb10x_gpio_remove(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev);
- int ret;
if (tb10x_gpio->gc.to_irq) {
irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0],
@@ -300,9 +299,7 @@ static int __exit tb10x_gpio_remove(struct platform_device *pdev)
irq_domain_remove(tb10x_gpio->domain);
free_irq(tb10x_gpio->irq, tb10x_gpio);
}
- ret = gpiochip_remove(&tb10x_gpio->gc);
- if (ret)
- return ret;
+ gpiochip_remove(&tb10x_gpio->gc);
return 0;
}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index 51f7cbd9ff71..7324869c38e0 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -313,17 +313,11 @@ static int tc3589x_gpio_remove(struct platform_device *pdev)
struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
struct tc3589x_gpio_platform_data *pdata = tc3589x->pdata->gpio;
- int ret;
if (pdata && pdata->remove)
pdata->remove(tc3589x, tc3589x_gpio->chip.base);
- ret = gpiochip_remove(&tc3589x_gpio->chip);
- if (ret < 0) {
- dev_err(tc3589x_gpio->dev,
- "unable to remove gpiochip: %d\n", ret);
- return ret;
- }
+ gpiochip_remove(&tc3589x_gpio->chip);
return 0;
}
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index efc7c129016d..a685a3cbbc81 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -307,7 +307,6 @@ static int timbgpio_probe(struct platform_device *pdev)
static int timbgpio_remove(struct platform_device *pdev)
{
- int err;
struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct timbgpio *tgpio = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -323,9 +322,7 @@ static int timbgpio_remove(struct platform_device *pdev)
irq_set_handler_data(irq, NULL);
}
- err = gpiochip_remove(&tgpio->gpio);
- if (err)
- printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n");
+ gpiochip_remove(&tgpio->gpio);
return 0;
}
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index a69fbea41253..9c9238e838a9 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -137,7 +137,8 @@ static int tps6586x_gpio_remove(struct platform_device *pdev)
{
struct tps6586x_gpio *tps6586x_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&tps6586x_gpio->gpio_chip);
+ gpiochip_remove(&tps6586x_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver tps6586x_gpio_driver = {
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index e2f8cda235ea..88f1f5ff4e96 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -190,7 +190,8 @@ static int tps65910_gpio_remove(struct platform_device *pdev)
{
struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&tps65910_gpio->gpio_chip);
+ gpiochip_remove(&tps65910_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver tps65910_gpio_driver = {
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 59ee486cb8b9..22052d84c63b 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -117,7 +117,8 @@ static int tps65912_gpio_remove(struct platform_device *pdev)
{
struct tps65912_gpio_data *tps65912_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&tps65912_gpio->gpio_chip);
+ gpiochip_remove(&tps65912_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver tps65912_gpio_driver = {
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 3df3ebdb3e52..de18591ff11e 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -427,8 +427,7 @@ static int ts5500_dio_probe(struct platform_device *pdev)
return 0;
cleanup:
- if (gpiochip_remove(&priv->gpio_chip))
- dev_err(dev, "failed to remove gpio chip\n");
+ gpiochip_remove(&priv->gpio_chip);
return ret;
}
@@ -437,7 +436,8 @@ static int ts5500_dio_remove(struct platform_device *pdev)
struct ts5500_priv *priv = platform_get_drvdata(pdev);
ts5500_disable_irq(priv);
- return gpiochip_remove(&priv->gpio_chip);
+ gpiochip_remove(&priv->gpio_chip);
+ return 0;
}
static struct platform_device_id ts5500_dio_ids[] = {
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 3ebb1a5ff22e..118828b3736f 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -554,7 +554,7 @@ no_irqs:
platform_set_drvdata(pdev, priv);
- if (pdata && pdata->setup) {
+ if (pdata->setup) {
int status;
status = pdata->setup(&pdev->dev, priv->gpio_chip.base,
@@ -583,9 +583,7 @@ static int gpio_twl4030_remove(struct platform_device *pdev)
}
}
- status = gpiochip_remove(&priv->gpio_chip);
- if (status < 0)
- return status;
+ gpiochip_remove(&priv->gpio_chip);
if (is_module())
return 0;
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index 0caf5cd1b47d..f28e04b88aa9 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -111,7 +111,8 @@ static int gpo_twl6040_probe(struct platform_device *pdev)
static int gpo_twl6040_remove(struct platform_device *pdev)
{
- return gpiochip_remove(&twl6040gpo_chip);
+ gpiochip_remove(&twl6040gpo_chip);
+ return 0;
}
/* Note: this hardware lives inside an I2C-based multi-function device. */
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 2445fe771179..d502825159b9 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -70,7 +70,7 @@ static int ucb1400_gpio_probe(struct platform_device *dev)
if (err)
goto err;
- if (ucb && ucb->gpio_setup)
+ if (ucb->gpio_setup)
err = ucb->gpio_setup(&dev->dev, ucb->gc.ngpio);
err:
@@ -89,7 +89,7 @@ static int ucb1400_gpio_remove(struct platform_device *dev)
return err;
}
- err = gpiochip_remove(&ucb->gc);
+ gpiochip_remove(&ucb->gc);
return err;
}
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index 79e3b5836712..e2a11f27807f 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -446,8 +446,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
return ret;
err_gpiob:
- if (gpiochip_remove(&vb_gpio->gpioa))
- dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
+ gpiochip_remove(&vb_gpio->gpioa);
err_gpioa:
return ret;
@@ -456,13 +455,10 @@ err_gpioa:
static int vprbrd_gpio_remove(struct platform_device *pdev)
{
struct vprbrd_gpio *vb_gpio = platform_get_drvdata(pdev);
- int ret;
- ret = gpiochip_remove(&vb_gpio->gpiob);
- if (ret == 0)
- ret = gpiochip_remove(&vb_gpio->gpioa);
+ gpiochip_remove(&vb_gpio->gpiob);
- return ret;
+ return 0;
}
static struct platform_driver vprbrd_gpio_driver = {
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 66cbcc108e62..dbf28fa03f67 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -515,7 +515,7 @@ static int giu_probe(struct platform_device *pdev)
struct resource *res;
unsigned int trigger, i, pin;
struct irq_chip *chip;
- int irq, retval;
+ int irq, ret;
switch (pdev->id) {
case GPIO_50PINS_PULLUPDOWN:
@@ -544,7 +544,11 @@ static int giu_probe(struct platform_device *pdev)
vr41xx_gpio_chip.dev = &pdev->dev;
- retval = gpiochip_add(&vr41xx_gpio_chip);
+ ret = gpiochip_add(&vr41xx_gpio_chip);
+ if (!ret) {
+ iounmap(giu_base);
+ return -ENODEV;
+ }
giu_write(GIUINTENL, 0);
giu_write(GIUINTENH, 0);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 0fd23b6a753d..85971d4e23c1 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -288,8 +288,7 @@ static int vx855gpio_remove(struct platform_device *pdev)
struct vx855_gpio *vg = platform_get_drvdata(pdev);
struct resource *res;
- if (gpiochip_remove(&vg->gpio))
- dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
+ gpiochip_remove(&vg->gpio);
if (vg->gpi_reserved) {
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index b18a1a26425e..58ce75c188b7 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -279,7 +279,8 @@ static int wm831x_gpio_remove(struct platform_device *pdev)
{
struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&wm831x_gpio->gpio_chip);
+ gpiochip_remove(&wm831x_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver wm831x_gpio_driver = {
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 2487f9d575d3..060b89303bb6 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -145,7 +145,8 @@ static int wm8350_gpio_remove(struct platform_device *pdev)
{
struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&wm8350_gpio->gpio_chip);
+ gpiochip_remove(&wm8350_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver wm8350_gpio_driver = {
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index d93b6b581677..6f5e42db4b9e 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -285,7 +285,8 @@ static int wm8994_gpio_remove(struct platform_device *pdev)
{
struct wm8994_gpio *wm8994_gpio = platform_get_drvdata(pdev);
- return gpiochip_remove(&wm8994_gpio->gpio_chip);
+ gpiochip_remove(&wm8994_gpio->gpio_chip);
+ return 0;
}
static struct platform_driver wm8994_gpio_driver = {
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
new file mode 100644
index 000000000000..c3145f91fda3
--- /dev/null
+++ b/drivers/gpio/gpio-zynq.c
@@ -0,0 +1,692 @@
+/*
+ * Xilinx Zynq GPIO device driver
+ *
+ * Copyright (C) 2009 - 2014 Xilinx, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#define DRIVER_NAME "zynq-gpio"
+
+/* Maximum banks */
+#define ZYNQ_GPIO_MAX_BANK 4
+
+#define ZYNQ_GPIO_BANK0_NGPIO 32
+#define ZYNQ_GPIO_BANK1_NGPIO 22
+#define ZYNQ_GPIO_BANK2_NGPIO 32
+#define ZYNQ_GPIO_BANK3_NGPIO 32
+
+#define ZYNQ_GPIO_NR_GPIOS (ZYNQ_GPIO_BANK0_NGPIO + \
+ ZYNQ_GPIO_BANK1_NGPIO + \
+ ZYNQ_GPIO_BANK2_NGPIO + \
+ ZYNQ_GPIO_BANK3_NGPIO)
+
+#define ZYNQ_GPIO_BANK0_PIN_MIN 0
+#define ZYNQ_GPIO_BANK0_PIN_MAX (ZYNQ_GPIO_BANK0_PIN_MIN + \
+ ZYNQ_GPIO_BANK0_NGPIO - 1)
+#define ZYNQ_GPIO_BANK1_PIN_MIN (ZYNQ_GPIO_BANK0_PIN_MAX + 1)
+#define ZYNQ_GPIO_BANK1_PIN_MAX (ZYNQ_GPIO_BANK1_PIN_MIN + \
+ ZYNQ_GPIO_BANK1_NGPIO - 1)
+#define ZYNQ_GPIO_BANK2_PIN_MIN (ZYNQ_GPIO_BANK1_PIN_MAX + 1)
+#define ZYNQ_GPIO_BANK2_PIN_MAX (ZYNQ_GPIO_BANK2_PIN_MIN + \
+ ZYNQ_GPIO_BANK2_NGPIO - 1)
+#define ZYNQ_GPIO_BANK3_PIN_MIN (ZYNQ_GPIO_BANK2_PIN_MAX + 1)
+#define ZYNQ_GPIO_BANK3_PIN_MAX (ZYNQ_GPIO_BANK3_PIN_MIN + \
+ ZYNQ_GPIO_BANK3_NGPIO - 1)
+
+
+/* Register offsets for the GPIO device */
+/* LSW Mask & Data -WO */
+#define ZYNQ_GPIO_DATA_LSW_OFFSET(BANK) (0x000 + (8 * BANK))
+/* MSW Mask & Data -WO */
+#define ZYNQ_GPIO_DATA_MSW_OFFSET(BANK) (0x004 + (8 * BANK))
+/* Data Register-RW */
+#define ZYNQ_GPIO_DATA_RO_OFFSET(BANK) (0x060 + (4 * BANK))
+/* Direction mode reg-RW */
+#define ZYNQ_GPIO_DIRM_OFFSET(BANK) (0x204 + (0x40 * BANK))
+/* Output enable reg-RW */
+#define ZYNQ_GPIO_OUTEN_OFFSET(BANK) (0x208 + (0x40 * BANK))
+/* Interrupt mask reg-RO */
+#define ZYNQ_GPIO_INTMASK_OFFSET(BANK) (0x20C + (0x40 * BANK))
+/* Interrupt enable reg-WO */
+#define ZYNQ_GPIO_INTEN_OFFSET(BANK) (0x210 + (0x40 * BANK))
+/* Interrupt disable reg-WO */
+#define ZYNQ_GPIO_INTDIS_OFFSET(BANK) (0x214 + (0x40 * BANK))
+/* Interrupt status reg-RO */
+#define ZYNQ_GPIO_INTSTS_OFFSET(BANK) (0x218 + (0x40 * BANK))
+/* Interrupt type reg-RW */
+#define ZYNQ_GPIO_INTTYPE_OFFSET(BANK) (0x21C + (0x40 * BANK))
+/* Interrupt polarity reg-RW */
+#define ZYNQ_GPIO_INTPOL_OFFSET(BANK) (0x220 + (0x40 * BANK))
+/* Interrupt on any, reg-RW */
+#define ZYNQ_GPIO_INTANY_OFFSET(BANK) (0x224 + (0x40 * BANK))
+
+/* Disable all interrupts mask */
+#define ZYNQ_GPIO_IXR_DISABLE_ALL 0xFFFFFFFF
+
+/* Mid pin number of a bank */
+#define ZYNQ_GPIO_MID_PIN_NUM 16
+
+/* GPIO upper 16 bit mask */
+#define ZYNQ_GPIO_UPPER_MASK 0xFFFF0000
+
+/**
+ * struct zynq_gpio - gpio device private data structure
+ * @chip: instance of the gpio_chip
+ * @base_addr: base address of the GPIO device
+ * @clk: clock resource for this controller
+ */
+struct zynq_gpio {
+ struct gpio_chip chip;
+ void __iomem *base_addr;
+ struct clk *clk;
+};
+
+/**
+ * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank
+ * for a given pin in the GPIO device
+ * @pin_num: gpio pin number within the device
+ * @bank_num: an output parameter used to return the bank number of the gpio
+ * pin
+ * @bank_pin_num: an output parameter used to return pin number within a bank
+ * for the given gpio pin
+ *
+ * Returns the bank number and pin offset within the bank.
+ */
+static inline void zynq_gpio_get_bank_pin(unsigned int pin_num,
+ unsigned int *bank_num,
+ unsigned int *bank_pin_num)
+{
+ switch (pin_num) {
+ case ZYNQ_GPIO_BANK0_PIN_MIN ... ZYNQ_GPIO_BANK0_PIN_MAX:
+ *bank_num = 0;
+ *bank_pin_num = pin_num;
+ break;
+ case ZYNQ_GPIO_BANK1_PIN_MIN ... ZYNQ_GPIO_BANK1_PIN_MAX:
+ *bank_num = 1;
+ *bank_pin_num = pin_num - ZYNQ_GPIO_BANK1_PIN_MIN;
+ break;
+ case ZYNQ_GPIO_BANK2_PIN_MIN ... ZYNQ_GPIO_BANK2_PIN_MAX:
+ *bank_num = 2;
+ *bank_pin_num = pin_num - ZYNQ_GPIO_BANK2_PIN_MIN;
+ break;
+ case ZYNQ_GPIO_BANK3_PIN_MIN ... ZYNQ_GPIO_BANK3_PIN_MAX:
+ *bank_num = 3;
+ *bank_pin_num = pin_num - ZYNQ_GPIO_BANK3_PIN_MIN;
+ break;
+ default:
+ WARN(true, "invalid GPIO pin number: %u", pin_num);
+ *bank_num = 0;
+ *bank_pin_num = 0;
+ break;
+ }
+}
+
+/**
+ * zynq_gpio_get_value - Get the state of the specified pin of GPIO device
+ * @chip: gpio_chip instance to be worked on
+ * @pin: gpio pin number within the device
+ *
+ * This function reads the state of the specified pin of the GPIO device.
+ *
+ * Return: 0 if the pin is low, 1 if pin is high.
+ */
+static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
+{
+ u32 data;
+ unsigned int bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+
+ zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num);
+
+ data = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_DATA_RO_OFFSET(bank_num));
+
+ return (data >> bank_pin_num) & 1;
+}
+
+/**
+ * zynq_gpio_set_value - Modify the state of the pin with specified value
+ * @chip: gpio_chip instance to be worked on
+ * @pin: gpio pin number within the device
+ * @state: value used to modify the state of the specified pin
+ *
+ * This function calculates the register offset (i.e to lower 16 bits or
+ * upper 16 bits) based on the given pin number and sets the state of a
+ * gpio pin to the specified value. The state is either 0 or non-zero.
+ */
+static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
+ int state)
+{
+ unsigned int reg_offset, bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+
+ zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num);
+
+ if (bank_pin_num >= ZYNQ_GPIO_MID_PIN_NUM) {
+ /* only 16 data bits in bit maskable reg */
+ bank_pin_num -= ZYNQ_GPIO_MID_PIN_NUM;
+ reg_offset = ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num);
+ } else {
+ reg_offset = ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num);
+ }
+
+ /*
+ * get the 32 bit value to be written to the mask/data register where
+ * the upper 16 bits is the mask and lower 16 bits is the data
+ */
+ state = !!state;
+ state = ~(1 << (bank_pin_num + ZYNQ_GPIO_MID_PIN_NUM)) &
+ ((state << bank_pin_num) | ZYNQ_GPIO_UPPER_MASK);
+
+ writel_relaxed(state, gpio->base_addr + reg_offset);
+}
+
+/**
+ * zynq_gpio_dir_in - Set the direction of the specified GPIO pin as input
+ * @chip: gpio_chip instance to be worked on
+ * @pin: gpio pin number within the device
+ *
+ * This function uses the read-modify-write sequence to set the direction of
+ * the gpio pin as input.
+ *
+ * Return: 0 always
+ */
+static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
+{
+ u32 reg;
+ unsigned int bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+
+ zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num);
+
+ /* bank 0 pins 7 and 8 are special and cannot be used as inputs */
+ if (bank_num == 0 && (bank_pin_num == 7 || bank_pin_num == 8))
+ return -EINVAL;
+
+ /* clear the bit in direction mode reg to set the pin as input */
+ reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ reg &= ~BIT(bank_pin_num);
+ writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+
+ return 0;
+}
+
+/**
+ * zynq_gpio_dir_out - Set the direction of the specified GPIO pin as output
+ * @chip: gpio_chip instance to be worked on
+ * @pin: gpio pin number within the device
+ * @state: value to be written to specified pin
+ *
+ * This function sets the direction of specified GPIO pin as output, configures
+ * the Output Enable register for the pin and uses zynq_gpio_set to set
+ * the state of the pin to the value specified.
+ *
+ * Return: 0 always
+ */
+static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
+ int state)
+{
+ u32 reg;
+ unsigned int bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = container_of(chip, struct zynq_gpio, chip);
+
+ zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num);
+
+ /* set the GPIO pin as output */
+ reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+ reg |= BIT(bank_pin_num);
+ writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+
+ /* configure the output enable reg for the pin */
+ reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
+ reg |= BIT(bank_pin_num);
+ writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num));
+
+ /* set the state of the pin */
+ zynq_gpio_set_value(chip, pin, state);
+ return 0;
+}
+
+/**
+ * zynq_gpio_irq_mask - Disable the interrupts for a gpio pin
+ * @irq_data: per irq and chip data passed down to chip functions
+ *
+ * This function calculates gpio pin number from irq number and sets the
+ * bit in the Interrupt Disable register of the corresponding bank to disable
+ * interrupts for that pin.
+ */
+static void zynq_gpio_irq_mask(struct irq_data *irq_data)
+{
+ unsigned int device_pin_num, bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+
+ device_pin_num = irq_data->hwirq;
+ zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num);
+ writel_relaxed(BIT(bank_pin_num),
+ gpio->base_addr + ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
+}
+
+/**
+ * zynq_gpio_irq_unmask - Enable the interrupts for a gpio pin
+ * @irq_data: irq data containing irq number of gpio pin for the interrupt
+ * to enable
+ *
+ * This function calculates the gpio pin number from irq number and sets the
+ * bit in the Interrupt Enable register of the corresponding bank to enable
+ * interrupts for that pin.
+ */
+static void zynq_gpio_irq_unmask(struct irq_data *irq_data)
+{
+ unsigned int device_pin_num, bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+
+ device_pin_num = irq_data->hwirq;
+ zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num);
+ writel_relaxed(BIT(bank_pin_num),
+ gpio->base_addr + ZYNQ_GPIO_INTEN_OFFSET(bank_num));
+}
+
+/**
+ * zynq_gpio_irq_ack - Acknowledge the interrupt of a gpio pin
+ * @irq_data: irq data containing irq number of gpio pin for the interrupt
+ * to ack
+ *
+ * This function calculates gpio pin number from irq number and sets the bit
+ * in the Interrupt Status Register of the corresponding bank, to ACK the irq.
+ */
+static void zynq_gpio_irq_ack(struct irq_data *irq_data)
+{
+ unsigned int device_pin_num, bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+
+ device_pin_num = irq_data->hwirq;
+ zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num);
+ writel_relaxed(BIT(bank_pin_num),
+ gpio->base_addr + ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
+}
+
+/**
+ * zynq_gpio_irq_enable - Enable the interrupts for a gpio pin
+ * @irq_data: irq data containing irq number of gpio pin for the interrupt
+ * to enable
+ *
+ * Clears the INTSTS bit and unmasks the given interrrupt.
+ */
+static void zynq_gpio_irq_enable(struct irq_data *irq_data)
+{
+ /*
+ * The Zynq GPIO controller does not disable interrupt detection when
+ * the interrupt is masked and only disables the propagation of the
+ * interrupt. This means when the controller detects an interrupt
+ * condition while the interrupt is logically disabled it will propagate
+ * that interrupt event once the interrupt is enabled. This will cause
+ * the interrupt consumer to see spurious interrupts to prevent this
+ * first make sure that the interrupt is not asserted and then enable
+ * it.
+ */
+ zynq_gpio_irq_ack(irq_data);
+ zynq_gpio_irq_unmask(irq_data);
+}
+
+/**
+ * zynq_gpio_set_irq_type - Set the irq type for a gpio pin
+ * @irq_data: irq data containing irq number of gpio pin
+ * @type: interrupt type that is to be set for the gpio pin
+ *
+ * This function gets the gpio pin number and its bank from the gpio pin number
+ * and configures the INT_TYPE, INT_POLARITY and INT_ANY registers.
+ *
+ * Return: 0, negative error otherwise.
+ * TYPE-EDGE_RISING, INT_TYPE - 1, INT_POLARITY - 1, INT_ANY - 0;
+ * TYPE-EDGE_FALLING, INT_TYPE - 1, INT_POLARITY - 0, INT_ANY - 0;
+ * TYPE-EDGE_BOTH, INT_TYPE - 1, INT_POLARITY - NA, INT_ANY - 1;
+ * TYPE-LEVEL_HIGH, INT_TYPE - 0, INT_POLARITY - 1, INT_ANY - NA;
+ * TYPE-LEVEL_LOW, INT_TYPE - 0, INT_POLARITY - 0, INT_ANY - NA
+ */
+static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type)
+{
+ u32 int_type, int_pol, int_any;
+ unsigned int device_pin_num, bank_num, bank_pin_num;
+ struct zynq_gpio *gpio = irq_data_get_irq_chip_data(irq_data);
+
+ device_pin_num = irq_data->hwirq;
+ zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num);
+
+ int_type = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
+ int_pol = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
+ int_any = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+
+ /*
+ * based on the type requested, configure the INT_TYPE, INT_POLARITY
+ * and INT_ANY registers
+ */
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ int_type |= BIT(bank_pin_num);
+ int_pol |= BIT(bank_pin_num);
+ int_any &= ~BIT(bank_pin_num);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ int_type |= BIT(bank_pin_num);
+ int_pol &= ~BIT(bank_pin_num);
+ int_any &= ~BIT(bank_pin_num);
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ int_type |= BIT(bank_pin_num);
+ int_any |= BIT(bank_pin_num);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ int_type &= ~BIT(bank_pin_num);
+ int_pol |= BIT(bank_pin_num);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ int_type &= ~BIT(bank_pin_num);
+ int_pol &= ~BIT(bank_pin_num);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(int_type,
+ gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
+ writel_relaxed(int_pol,
+ gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num));
+ writel_relaxed(int_any,
+ gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num));
+ return 0;
+}
+
+static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
+{
+ if (on)
+ zynq_gpio_irq_unmask(data);
+ else
+ zynq_gpio_irq_mask(data);
+
+ return 0;
+}
+
+/* irq chip descriptor */
+static struct irq_chip zynq_gpio_irqchip = {
+ .name = DRIVER_NAME,
+ .irq_enable = zynq_gpio_irq_enable,
+ .irq_mask = zynq_gpio_irq_mask,
+ .irq_unmask = zynq_gpio_irq_unmask,
+ .irq_set_type = zynq_gpio_set_irq_type,
+ .irq_set_wake = zynq_gpio_set_wake,
+};
+
+/**
+ * zynq_gpio_irqhandler - IRQ handler for the gpio banks of a gpio device
+ * @irq: irq number of the gpio bank where interrupt has occurred
+ * @desc: irq descriptor instance of the 'irq'
+ *
+ * This function reads the Interrupt Status Register of each bank to get the
+ * gpio pin number which has triggered an interrupt. It then acks the triggered
+ * interrupt and calls the pin specific handler set by the higher layer
+ * application for that pin.
+ * Note: A bug is reported if no handler is set for the gpio pin.
+ */
+static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
+{
+ u32 int_sts, int_enb;
+ unsigned int bank_num;
+ struct zynq_gpio *gpio = irq_get_handler_data(irq);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
+
+ chained_irq_enter(irqchip, desc);
+
+ for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++) {
+ int_sts = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
+ int_enb = readl_relaxed(gpio->base_addr +
+ ZYNQ_GPIO_INTMASK_OFFSET(bank_num));
+ int_sts &= ~int_enb;
+ if (int_sts) {
+ int offset;
+ unsigned long pending = int_sts;
+
+ for_each_set_bit(offset, &pending, 32) {
+ unsigned int gpio_irq =
+ irq_find_mapping(gpio->chip.irqdomain,
+ offset);
+ generic_handle_irq(gpio_irq);
+ }
+
+ /* clear IRQ in HW */
+ writel_relaxed(int_sts, gpio->base_addr +
+ ZYNQ_GPIO_INTSTS_OFFSET(bank_num));
+ }
+ }
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static int __maybe_unused zynq_gpio_suspend(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_suspend(dev);
+
+ return 0;
+}
+
+static int __maybe_unused zynq_gpio_resume(struct device *dev)
+{
+ if (!device_may_wakeup(dev))
+ return pm_runtime_force_resume(dev);
+
+ return 0;
+}
+
+static int __maybe_unused zynq_gpio_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(gpio->clk);
+
+ return 0;
+}
+
+static int __maybe_unused zynq_gpio_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+
+ return clk_prepare_enable(gpio->clk);
+}
+
+static int zynq_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(chip->dev);
+
+ /*
+ * If the device is already active pm_runtime_get() will return 1 on
+ * success, but gpio_request still needs to return 0.
+ */
+ return ret < 0 ? ret : 0;
+}
+
+static void zynq_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pm_runtime_put(chip->dev);
+}
+
+static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
+ SET_PM_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend,
+ zynq_gpio_runtime_resume, NULL)
+};
+
+/**
+ * zynq_gpio_probe - Initialization method for a zynq_gpio device
+ * @pdev: platform device instance
+ *
+ * This function allocates memory resources for the gpio device and registers
+ * all the banks of the device. It will also set up interrupts for the gpio
+ * pins.
+ * Note: Interrupts are disabled for all the banks during initialization.
+ *
+ * Return: 0 on success, negative error otherwise.
+ */
+static int zynq_gpio_probe(struct platform_device *pdev)
+{
+ int ret, bank_num, irq;
+ struct zynq_gpio *gpio;
+ struct gpio_chip *chip;
+ struct resource *res;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, gpio);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpio->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gpio->base_addr))
+ return PTR_ERR(gpio->base_addr);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "invalid IRQ\n");
+ return irq;
+ }
+
+ /* configure the gpio chip */
+ chip = &gpio->chip;
+ chip->label = "zynq_gpio";
+ chip->owner = THIS_MODULE;
+ chip->dev = &pdev->dev;
+ chip->get = zynq_gpio_get_value;
+ chip->set = zynq_gpio_set_value;
+ chip->request = zynq_gpio_request;
+ chip->free = zynq_gpio_free;
+ chip->direction_input = zynq_gpio_dir_in;
+ chip->direction_output = zynq_gpio_dir_out;
+ chip->base = -1;
+ chip->ngpio = ZYNQ_GPIO_NR_GPIOS;
+
+ /* Enable GPIO clock */
+ gpio->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gpio->clk)) {
+ dev_err(&pdev->dev, "input clock not found.\n");
+ return PTR_ERR(gpio->clk);
+ }
+ ret = clk_prepare_enable(gpio->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
+ /* report a bug if gpio chip registration fails */
+ ret = gpiochip_add(chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add gpio chip\n");
+ goto err_disable_clk;
+ }
+
+ /* disable interrupts for all banks */
+ for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++)
+ writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
+ ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
+
+ ret = gpiochip_irqchip_add(chip, &zynq_gpio_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add irq chip\n");
+ goto err_rm_gpiochip;
+ }
+
+ gpiochip_set_chained_irqchip(chip, &zynq_gpio_irqchip, irq,
+ zynq_gpio_irqhandler);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ return 0;
+
+err_rm_gpiochip:
+ if (gpiochip_remove(chip))
+ dev_err(&pdev->dev, "Failed to remove gpio chip\n");
+err_disable_clk:
+ clk_disable_unprepare(gpio->clk);
+
+ return ret;
+}
+
+/**
+ * zynq_gpio_remove - Driver removal function
+ * @pdev: platform device instance
+ *
+ * Return: 0 always
+ */
+static int zynq_gpio_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct zynq_gpio *gpio = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ ret = gpiochip_remove(&gpio->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to remove gpio chip\n");
+ return ret;
+ }
+ clk_disable_unprepare(gpio->clk);
+ device_set_wakeup_capable(&pdev->dev, 0);
+ return 0;
+}
+
+static struct of_device_id zynq_gpio_of_match[] = {
+ { .compatible = "xlnx,zynq-gpio-1.0", },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, zynq_gpio_of_match);
+
+static struct platform_driver zynq_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &zynq_gpio_dev_pm_ops,
+ .of_match_table = zynq_gpio_of_match,
+ },
+ .probe = zynq_gpio_probe,
+ .remove = zynq_gpio_remove,
+};
+
+/**
+ * zynq_gpio_init - Initial driver registration call
+ *
+ * Return: value from platform_driver_register
+ */
+static int __init zynq_gpio_init(void)
+{
+ return platform_driver_register(&zynq_gpio_driver);
+}
+postcore_initcall(zynq_gpio_init);
+
+MODULE_AUTHOR("Xilinx Inc.");
+MODULE_DESCRIPTION("Zynq GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 4a987917c186..d62eaaa75397 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -157,7 +157,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
- ret = gpiod_lock_as_irq(desc);
+ ret = gpio_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->dev, "Failed to lock GPIO as interrupt\n");
goto fail_free_desc;
@@ -212,7 +212,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
fail_free_event:
kfree(event);
fail_unlock_irq:
- gpiod_unlock_as_irq(desc);
+ gpio_unlock_as_irq(chip, pin);
fail_free_desc:
gpiochip_free_own_desc(desc);
@@ -221,7 +221,7 @@ fail_free_desc:
/**
* acpi_gpiochip_request_interrupts() - Register isr for gpio chip ACPI events
- * @acpi_gpio: ACPI GPIO chip
+ * @chip: GPIO chip
*
* ACPI5 platforms can use GPIO signaled ACPI events. These GPIO interrupts are
* handled by ACPI event methods which need to be called from the GPIO
@@ -229,11 +229,21 @@ fail_free_desc:
* gpio pins have acpi event methods and assigns interrupt handlers that calls
* the acpi event methods for those pins.
*/
-static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio)
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
{
- struct gpio_chip *chip = acpi_gpio->chip;
+ struct acpi_gpio_chip *acpi_gpio;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!chip->dev || !chip->to_irq)
+ return;
- if (!chip->to_irq)
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+ if (ACPI_FAILURE(status))
return;
INIT_LIST_HEAD(&acpi_gpio->events);
@@ -243,17 +253,27 @@ static void acpi_gpiochip_request_interrupts(struct acpi_gpio_chip *acpi_gpio)
/**
* acpi_gpiochip_free_interrupts() - Free GPIO ACPI event interrupts.
- * @acpi_gpio: ACPI GPIO chip
+ * @chip: GPIO chip
*
* Free interrupts associated with GPIO ACPI event method for the given
* GPIO chip.
*/
-static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio)
+void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
{
+ struct acpi_gpio_chip *acpi_gpio;
struct acpi_gpio_event *event, *ep;
- struct gpio_chip *chip = acpi_gpio->chip;
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!chip->dev || !chip->to_irq)
+ return;
- if (!chip->to_irq)
+ handle = ACPI_HANDLE(chip->dev);
+ if (!handle)
+ return;
+
+ status = acpi_get_data(handle, acpi_gpio_chip_dh, (void **)&acpi_gpio);
+ if (ACPI_FAILURE(status))
return;
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
@@ -263,7 +283,7 @@ static void acpi_gpiochip_free_interrupts(struct acpi_gpio_chip *acpi_gpio)
desc = gpiochip_get_desc(chip, event->pin);
if (WARN_ON(IS_ERR(desc)))
continue;
- gpiod_unlock_as_irq(desc);
+ gpio_unlock_as_irq(chip, event->pin);
gpiochip_free_own_desc(desc);
list_del(&event->node);
kfree(event);
@@ -525,7 +545,6 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
return;
}
- acpi_gpiochip_request_interrupts(acpi_gpio);
acpi_gpiochip_request_regions(acpi_gpio);
}
@@ -549,7 +568,6 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
}
acpi_gpiochip_free_regions(acpi_gpio);
- acpi_gpiochip_free_interrupts(acpi_gpio);
acpi_detach_data(handle, acpi_gpio_chip_dh);
kfree(acpi_gpio);
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
new file mode 100644
index 000000000000..078ae6c2df79
--- /dev/null
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -0,0 +1,102 @@
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+
+#include <linux/gpio.h>
+
+#include "gpiolib.h"
+
+void gpio_free(unsigned gpio)
+{
+ gpiod_free(gpio_to_desc(gpio));
+}
+EXPORT_SYMBOL_GPL(gpio_free);
+
+/**
+ * gpio_request_one - request a single GPIO with initial configuration
+ * @gpio: the GPIO number
+ * @flags: GPIO configuration as specified by GPIOF_*
+ * @label: a literal description string of this GPIO
+ */
+int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+{
+ struct gpio_desc *desc;
+ int err;
+
+ desc = gpio_to_desc(gpio);
+
+ err = gpiod_request(desc, label);
+ if (err)
+ return err;
+
+ if (flags & GPIOF_OPEN_DRAIN)
+ set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+
+ if (flags & GPIOF_OPEN_SOURCE)
+ set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
+ if (flags & GPIOF_ACTIVE_LOW)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
+ if (flags & GPIOF_DIR_IN)
+ err = gpiod_direction_input(desc);
+ else
+ err = gpiod_direction_output_raw(desc,
+ (flags & GPIOF_INIT_HIGH) ? 1 : 0);
+
+ if (err)
+ goto free_gpio;
+
+ if (flags & GPIOF_EXPORT) {
+ err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE);
+ if (err)
+ goto free_gpio;
+ }
+
+ return 0;
+
+ free_gpio:
+ gpiod_free(desc);
+ return err;
+}
+EXPORT_SYMBOL_GPL(gpio_request_one);
+
+int gpio_request(unsigned gpio, const char *label)
+{
+ return gpiod_request(gpio_to_desc(gpio), label);
+}
+EXPORT_SYMBOL_GPL(gpio_request);
+
+/**
+ * gpio_request_array - request multiple GPIOs in a single call
+ * @array: array of the 'struct gpio'
+ * @num: how many GPIOs in the array
+ */
+int gpio_request_array(const struct gpio *array, size_t num)
+{
+ int i, err;
+
+ for (i = 0; i < num; i++, array++) {
+ err = gpio_request_one(array->gpio, array->flags, array->label);
+ if (err)
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ while (i--)
+ gpio_free((--array)->gpio);
+ return err;
+}
+EXPORT_SYMBOL_GPL(gpio_request_array);
+
+/**
+ * gpio_free_array - release multiple GPIOs in a single call
+ * @array: array of the 'struct gpio'
+ * @num: how many GPIOs in the array
+ */
+void gpio_free_array(const struct gpio *array, size_t num)
+{
+ while (num--)
+ gpio_free((array++)->gpio);
+}
+EXPORT_SYMBOL_GPL(gpio_free_array);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index af7e25c9a9ae..7cfdc2278905 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -23,7 +23,7 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/slab.h>
-struct gpio_desc;
+#include "gpiolib.h"
/* Private data structure for of_gpiochip_find_and_xlate */
struct gg_data {
@@ -82,19 +82,19 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
&gg_data.gpiospec);
if (ret) {
- pr_debug("%s: can't parse gpios property of node '%s[%d]'\n",
- __func__, np->full_name, index);
+ pr_debug("%s: can't parse '%s' property of node '%s[%d]'\n",
+ __func__, propname, np->full_name, index);
return ERR_PTR(ret);
}
gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
of_node_put(gg_data.gpiospec.np);
- pr_debug("%s exited with status %d\n", __func__,
+ pr_debug("%s: parsed '%s' property of node '%s[%d]' - status (%d)\n",
+ __func__, propname, np->full_name, index,
PTR_ERR_OR_ZERO(gg_data.out_gpio));
return gg_data.out_gpio;
}
-EXPORT_SYMBOL(of_get_named_gpiod_flags);
int of_get_named_gpio_flags(struct device_node *np, const char *list_name,
int index, enum of_gpio_flags *flags)
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
new file mode 100644
index 000000000000..5f2150b619a7
--- /dev/null
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -0,0 +1,827 @@
+#include <linux/idr.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/kdev_t.h>
+
+#include "gpiolib.h"
+
+static DEFINE_IDR(dirent_idr);
+
+
+/* lock protects against unexport_gpio() being called while
+ * sysfs files are active.
+ */
+static DEFINE_MUTEX(sysfs_lock);
+
+/*
+ * /sys/class/gpio/gpioN... only for GPIOs that are exported
+ * /direction
+ * * MAY BE OMITTED if kernel won't allow direction changes
+ * * is read/write as "in" or "out"
+ * * may also be written as "high" or "low", initializing
+ * output value as specified ("out" implies "low")
+ * /value
+ * * always readable, subject to hardware behavior
+ * * may be writable, as zero/nonzero
+ * /edge
+ * * configures behavior of poll(2) on /value
+ * * available only if pin can generate IRQs on input
+ * * is read/write as "none", "falling", "rising", or "both"
+ * /active_low
+ * * configures polarity of /value
+ * * is read/write as zero/nonzero
+ * * also affects existing and subsequent "falling" and "rising"
+ * /edge configuration
+ */
+
+static ssize_t gpio_direction_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
+ status = -EIO;
+ } else {
+ gpiod_get_direction(desc);
+ status = sprintf(buf, "%s\n",
+ test_bit(FLAG_IS_OUT, &desc->flags)
+ ? "out" : "in");
+ }
+
+ mutex_unlock(&sysfs_lock);
+ return status;
+}
+
+static ssize_t gpio_direction_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else if (sysfs_streq(buf, "high"))
+ status = gpiod_direction_output_raw(desc, 1);
+ else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
+ status = gpiod_direction_output_raw(desc, 0);
+ else if (sysfs_streq(buf, "in"))
+ status = gpiod_direction_input(desc);
+ else
+ status = -EINVAL;
+
+ mutex_unlock(&sysfs_lock);
+ return status ? : size;
+}
+
+static /* const */ DEVICE_ATTR(direction, 0644,
+ gpio_direction_show, gpio_direction_store);
+
+static ssize_t gpio_value_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else
+ status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
+
+ mutex_unlock(&sysfs_lock);
+ return status;
+}
+
+static ssize_t gpio_value_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else if (!test_bit(FLAG_IS_OUT, &desc->flags))
+ status = -EPERM;
+ else {
+ long value;
+
+ status = kstrtol(buf, 0, &value);
+ if (status == 0) {
+ gpiod_set_value_cansleep(desc, value);
+ status = size;
+ }
+ }
+
+ mutex_unlock(&sysfs_lock);
+ return status;
+}
+
+static const DEVICE_ATTR(value, 0644,
+ gpio_value_show, gpio_value_store);
+
+static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
+{
+ struct kernfs_node *value_sd = priv;
+
+ sysfs_notify_dirent(value_sd);
+ return IRQ_HANDLED;
+}
+
+static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
+ unsigned long gpio_flags)
+{
+ struct kernfs_node *value_sd;
+ unsigned long irq_flags;
+ int ret, irq, id;
+
+ if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags)
+ return 0;
+
+ irq = gpiod_to_irq(desc);
+ if (irq < 0)
+ return -EIO;
+
+ id = desc->flags >> ID_SHIFT;
+ value_sd = idr_find(&dirent_idr, id);
+ if (value_sd)
+ free_irq(irq, value_sd);
+
+ desc->flags &= ~GPIO_TRIGGER_MASK;
+
+ if (!gpio_flags) {
+ gpio_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ ret = 0;
+ goto free_id;
+ }
+
+ irq_flags = IRQF_SHARED;
+ if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
+ irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+
+ if (!value_sd) {
+ value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
+ if (!value_sd) {
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto free_sd;
+ id = ret;
+
+ desc->flags &= GPIO_FLAGS_MASK;
+ desc->flags |= (unsigned long)id << ID_SHIFT;
+
+ if (desc->flags >> ID_SHIFT != id) {
+ ret = -ERANGE;
+ goto free_id;
+ }
+ }
+
+ ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags,
+ "gpiolib", value_sd);
+ if (ret < 0)
+ goto free_id;
+
+ ret = gpio_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc));
+ if (ret < 0) {
+ gpiod_warn(desc, "failed to flag the GPIO for IRQ\n");
+ goto free_id;
+ }
+
+ desc->flags |= gpio_flags;
+ return 0;
+
+free_id:
+ idr_remove(&dirent_idr, id);
+ desc->flags &= GPIO_FLAGS_MASK;
+free_sd:
+ if (value_sd)
+ sysfs_put(value_sd);
+err_out:
+ return ret;
+}
+
+static const struct {
+ const char *name;
+ unsigned long flags;
+} trigger_types[] = {
+ { "none", 0 },
+ { "falling", BIT(FLAG_TRIG_FALL) },
+ { "rising", BIT(FLAG_TRIG_RISE) },
+ { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) },
+};
+
+static ssize_t gpio_edge_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else {
+ int i;
+
+ status = 0;
+ for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
+ if ((desc->flags & GPIO_TRIGGER_MASK)
+ == trigger_types[i].flags) {
+ status = sprintf(buf, "%s\n",
+ trigger_types[i].name);
+ break;
+ }
+ }
+
+ mutex_unlock(&sysfs_lock);
+ return status;
+}
+
+static ssize_t gpio_edge_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
+ if (sysfs_streq(trigger_types[i].name, buf))
+ goto found;
+ return -EINVAL;
+
+found:
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else {
+ status = gpio_setup_irq(desc, dev, trigger_types[i].flags);
+ if (!status)
+ status = size;
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ return status;
+}
+
+static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
+
+static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
+ int value)
+{
+ int status = 0;
+
+ if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ return 0;
+
+ if (value)
+ set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ else
+ clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
+
+ /* reconfigure poll(2) support if enabled on one edge only */
+ if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
+ !!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
+ unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
+
+ gpio_setup_irq(desc, dev, 0);
+ status = gpio_setup_irq(desc, dev, trigger_flags);
+ }
+
+ return status;
+}
+
+static ssize_t gpio_active_low_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags))
+ status = -EIO;
+ else
+ status = sprintf(buf, "%d\n",
+ !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
+
+ mutex_unlock(&sysfs_lock);
+
+ return status;
+}
+
+static ssize_t gpio_active_low_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ ssize_t status;
+
+ mutex_lock(&sysfs_lock);
+
+ if (!test_bit(FLAG_EXPORT, &desc->flags)) {
+ status = -EIO;
+ } else {
+ long value;
+
+ status = kstrtol(buf, 0, &value);
+ if (status == 0)
+ status = sysfs_set_active_low(desc, dev, value != 0);
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ return status ? : size;
+}
+
+static const DEVICE_ATTR(active_low, 0644,
+ gpio_active_low_show, gpio_active_low_store);
+
+static const struct attribute *gpio_attrs[] = {
+ &dev_attr_value.attr,
+ &dev_attr_active_low.attr,
+ NULL,
+};
+
+static const struct attribute_group gpio_attr_group = {
+ .attrs = (struct attribute **) gpio_attrs,
+};
+
+/*
+ * /sys/class/gpio/gpiochipN/
+ * /base ... matching gpio_chip.base (N)
+ * /label ... matching gpio_chip.label
+ * /ngpio ... matching gpio_chip.ngpio
+ */
+
+static ssize_t chip_base_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", chip->base);
+}
+static DEVICE_ATTR(base, 0444, chip_base_show, NULL);
+
+static ssize_t chip_label_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", chip->label ? : "");
+}
+static DEVICE_ATTR(label, 0444, chip_label_show, NULL);
+
+static ssize_t chip_ngpio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct gpio_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", chip->ngpio);
+}
+static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
+
+static const struct attribute *gpiochip_attrs[] = {
+ &dev_attr_base.attr,
+ &dev_attr_label.attr,
+ &dev_attr_ngpio.attr,
+ NULL,
+};
+
+static const struct attribute_group gpiochip_attr_group = {
+ .attrs = (struct attribute **) gpiochip_attrs,
+};
+
+/*
+ * /sys/class/gpio/export ... write-only
+ * integer N ... number of GPIO to export (full access)
+ * /sys/class/gpio/unexport ... write-only
+ * integer N ... number of GPIO to unexport
+ */
+static ssize_t export_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t len)
+{
+ long gpio;
+ struct gpio_desc *desc;
+ int status;
+
+ status = kstrtol(buf, 0, &gpio);
+ if (status < 0)
+ goto done;
+
+ desc = gpio_to_desc(gpio);
+ /* reject invalid GPIOs */
+ if (!desc) {
+ pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ return -EINVAL;
+ }
+
+ /* No extra locking here; FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+
+ status = gpiod_request(desc, "sysfs");
+ if (status < 0) {
+ if (status == -EPROBE_DEFER)
+ status = -ENODEV;
+ goto done;
+ }
+ status = gpiod_export(desc, true);
+ if (status < 0)
+ gpiod_free(desc);
+ else
+ set_bit(FLAG_SYSFS, &desc->flags);
+
+done:
+ if (status)
+ pr_debug("%s: status %d\n", __func__, status);
+ return status ? : len;
+}
+
+static ssize_t unexport_store(struct class *class,
+ struct class_attribute *attr,
+ const char *buf, size_t len)
+{
+ long gpio;
+ struct gpio_desc *desc;
+ int status;
+
+ status = kstrtol(buf, 0, &gpio);
+ if (status < 0)
+ goto done;
+
+ desc = gpio_to_desc(gpio);
+ /* reject bogus commands (gpio_unexport ignores them) */
+ if (!desc) {
+ pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
+ return -EINVAL;
+ }
+
+ status = -EINVAL;
+
+ /* No extra locking here; FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
+ status = 0;
+ gpiod_free(desc);
+ }
+done:
+ if (status)
+ pr_debug("%s: status %d\n", __func__, status);
+ return status ? : len;
+}
+
+static struct class_attribute gpio_class_attrs[] = {
+ __ATTR(export, 0200, NULL, export_store),
+ __ATTR(unexport, 0200, NULL, unexport_store),
+ __ATTR_NULL,
+};
+
+static struct class gpio_class = {
+ .name = "gpio",
+ .owner = THIS_MODULE,
+
+ .class_attrs = gpio_class_attrs,
+};
+
+
+/**
+ * gpiod_export - export a GPIO through sysfs
+ * @gpio: gpio to make available, already requested
+ * @direction_may_change: true if userspace may change gpio direction
+ * Context: arch_initcall or later
+ *
+ * When drivers want to make a GPIO accessible to userspace after they
+ * have requested it -- perhaps while debugging, or as part of their
+ * public interface -- they may use this routine. If the GPIO can
+ * change direction (some can't) and the caller allows it, userspace
+ * will see "direction" sysfs attribute which may be used to change
+ * the gpio's direction. A "value" attribute will always be provided.
+ *
+ * Returns zero on success, else an error.
+ */
+int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
+{
+ unsigned long flags;
+ int status;
+ const char *ioname = NULL;
+ struct device *dev;
+ int offset;
+
+ /* can't export until sysfs is available ... */
+ if (!gpio_class.p) {
+ pr_debug("%s: called too early!\n", __func__);
+ return -ENOENT;
+ }
+
+ if (!desc) {
+ pr_debug("%s: invalid gpio descriptor\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sysfs_lock);
+
+ spin_lock_irqsave(&gpio_lock, flags);
+ if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
+ test_bit(FLAG_EXPORT, &desc->flags)) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n",
+ __func__,
+ test_bit(FLAG_REQUESTED, &desc->flags),
+ test_bit(FLAG_EXPORT, &desc->flags));
+ status = -EPERM;
+ goto fail_unlock;
+ }
+
+ if (!desc->chip->direction_input || !desc->chip->direction_output)
+ direction_may_change = false;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+ offset = gpio_chip_hwgpio(desc);
+ if (desc->chip->names && desc->chip->names[offset])
+ ioname = desc->chip->names[offset];
+
+ dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
+ desc, ioname ? ioname : "gpio%u",
+ desc_to_gpio(desc));
+ if (IS_ERR(dev)) {
+ status = PTR_ERR(dev);
+ goto fail_unlock;
+ }
+
+ status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
+ if (status)
+ goto fail_unregister_device;
+
+ if (direction_may_change) {
+ status = device_create_file(dev, &dev_attr_direction);
+ if (status)
+ goto fail_unregister_device;
+ }
+
+ if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
+ !test_bit(FLAG_IS_OUT, &desc->flags))) {
+ status = device_create_file(dev, &dev_attr_edge);
+ if (status)
+ goto fail_unregister_device;
+ }
+
+ set_bit(FLAG_EXPORT, &desc->flags);
+ mutex_unlock(&sysfs_lock);
+ return 0;
+
+fail_unregister_device:
+ device_unregister(dev);
+fail_unlock:
+ mutex_unlock(&sysfs_lock);
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpiod_export);
+
+static int match_export(struct device *dev, const void *data)
+{
+ return dev_get_drvdata(dev) == data;
+}
+
+/**
+ * gpiod_export_link - create a sysfs link to an exported GPIO node
+ * @dev: device under which to create symlink
+ * @name: name of the symlink
+ * @gpio: gpio to create symlink to, already exported
+ *
+ * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
+ * node. Caller is responsible for unlinking.
+ *
+ * Returns zero on success, else an error.
+ */
+int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc)
+{
+ int status = -EINVAL;
+
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sysfs_lock);
+
+ if (test_bit(FLAG_EXPORT, &desc->flags)) {
+ struct device *tdev;
+
+ tdev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (tdev != NULL) {
+ status = sysfs_create_link(&dev->kobj, &tdev->kobj,
+ name);
+ } else {
+ status = -ENODEV;
+ }
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ if (status)
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpiod_export_link);
+
+/**
+ * gpiod_sysfs_set_active_low - set the polarity of gpio sysfs value
+ * @gpio: gpio to change
+ * @value: non-zero to use active low, i.e. inverted values
+ *
+ * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
+ * The GPIO does not have to be exported yet. If poll(2) support has
+ * been enabled for either rising or falling edge, it will be
+ * reconfigured to follow the new polarity.
+ *
+ * Returns zero on success, else an error.
+ */
+int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
+{
+ struct device *dev = NULL;
+ int status = -EINVAL;
+
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&sysfs_lock);
+
+ if (test_bit(FLAG_EXPORT, &desc->flags)) {
+ dev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (dev == NULL) {
+ status = -ENODEV;
+ goto unlock;
+ }
+ }
+
+ status = sysfs_set_active_low(desc, dev, value);
+
+unlock:
+ mutex_unlock(&sysfs_lock);
+
+ if (status)
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low);
+
+/**
+ * gpiod_unexport - reverse effect of gpio_export()
+ * @gpio: gpio to make unavailable
+ *
+ * This is implicit on gpio_free().
+ */
+void gpiod_unexport(struct gpio_desc *desc)
+{
+ int status = 0;
+ struct device *dev = NULL;
+
+ if (!desc) {
+ pr_warn("%s: invalid GPIO\n", __func__);
+ return;
+ }
+
+ mutex_lock(&sysfs_lock);
+
+ if (test_bit(FLAG_EXPORT, &desc->flags)) {
+
+ dev = class_find_device(&gpio_class, NULL, desc, match_export);
+ if (dev) {
+ gpio_setup_irq(desc, dev, 0);
+ clear_bit(FLAG_EXPORT, &desc->flags);
+ } else
+ status = -ENODEV;
+ }
+
+ mutex_unlock(&sysfs_lock);
+
+ if (dev) {
+ device_unregister(dev);
+ put_device(dev);
+ }
+
+ if (status)
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+}
+EXPORT_SYMBOL_GPL(gpiod_unexport);
+
+int gpiochip_export(struct gpio_chip *chip)
+{
+ int status;
+ struct device *dev;
+
+ /* Many systems register gpio chips for SOC support very early,
+ * before driver model support is available. In those cases we
+ * export this later, in gpiolib_sysfs_init() ... here we just
+ * verify that _some_ field of gpio_class got initialized.
+ */
+ if (!gpio_class.p)
+ return 0;
+
+ /* use chip->base for the ID; it's already known to be unique */
+ mutex_lock(&sysfs_lock);
+ dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
+ "gpiochip%d", chip->base);
+ if (!IS_ERR(dev)) {
+ status = sysfs_create_group(&dev->kobj,
+ &gpiochip_attr_group);
+ } else
+ status = PTR_ERR(dev);
+ chip->exported = (status == 0);
+ mutex_unlock(&sysfs_lock);
+
+ if (status)
+ chip_dbg(chip, "%s: status %d\n", __func__, status);
+
+ return status;
+}
+
+void gpiochip_unexport(struct gpio_chip *chip)
+{
+ int status;
+ struct device *dev;
+
+ mutex_lock(&sysfs_lock);
+ dev = class_find_device(&gpio_class, NULL, chip, match_export);
+ if (dev) {
+ put_device(dev);
+ device_unregister(dev);
+ chip->exported = false;
+ status = 0;
+ } else
+ status = -ENODEV;
+ mutex_unlock(&sysfs_lock);
+
+ if (status)
+ chip_dbg(chip, "%s: status %d\n", __func__, status);
+}
+
+static int __init gpiolib_sysfs_init(void)
+{
+ int status;
+ unsigned long flags;
+ struct gpio_chip *chip;
+
+ status = class_register(&gpio_class);
+ if (status < 0)
+ return status;
+
+ /* Scan and register the gpio_chips which registered very
+ * early (e.g. before the class_register above was called).
+ *
+ * We run before arch_initcall() so chip->dev nodes can have
+ * registered, and so arch_initcall() can always gpio_export().
+ */
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_for_each_entry(chip, &gpio_chips, list) {
+ if (chip->exported)
+ continue;
+
+ /*
+ * TODO we yield gpio_lock here because gpiochip_export()
+ * acquires a mutex. This is unsafe and needs to be fixed.
+ *
+ * Also it would be nice to use gpiochip_find() here so we
+ * can keep gpio_chips local to gpiolib.c, but the yield of
+ * gpio_lock prevents us from doing this.
+ */
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ status = gpiochip_export(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
+ }
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
+
+ return status;
+}
+postcore_initcall(gpiolib_sysfs_init);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 2ebc9071e354..15cc0bb65dda 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
#include "gpiolib.h"
@@ -44,111 +45,19 @@
* While any GPIO is requested, its gpio_chip is not removable;
* each GPIO's "requested" flag serves as a lock and refcount.
*/
-static DEFINE_SPINLOCK(gpio_lock);
+DEFINE_SPINLOCK(gpio_lock);
-struct gpio_desc {
- struct gpio_chip *chip;
- unsigned long flags;
-/* flag symbols are bit numbers */
-#define FLAG_REQUESTED 0
-#define FLAG_IS_OUT 1
-#define FLAG_EXPORT 2 /* protected by sysfs_lock */
-#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
-#define FLAG_TRIG_FALL 4 /* trigger on falling edge */
-#define FLAG_TRIG_RISE 5 /* trigger on rising edge */
-#define FLAG_ACTIVE_LOW 6 /* value has active low */
-#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
-#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
-
-#define ID_SHIFT 16 /* add new flags before this one */
-
-#define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1)
-#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE))
-
-#ifdef CONFIG_DEBUG_FS
- const char *label;
-#endif
-};
static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
#define GPIO_OFFSET_VALID(chip, offset) (offset >= 0 && offset < chip->ngpio)
static DEFINE_MUTEX(gpio_lookup_lock);
static LIST_HEAD(gpio_lookup_list);
-static LIST_HEAD(gpio_chips);
-
-#ifdef CONFIG_GPIO_SYSFS
-static DEFINE_IDR(dirent_idr);
-#endif
-
-static int gpiod_request(struct gpio_desc *desc, const char *label);
-static void gpiod_free(struct gpio_desc *desc);
-
-/* With descriptor prefix */
-
-#ifdef CONFIG_DEBUG_FS
-#define gpiod_emerg(desc, fmt, ...) \
- pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
- ##__VA_ARGS__)
-#define gpiod_crit(desc, fmt, ...) \
- pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_err(desc, fmt, ...) \
- pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_warn(desc, fmt, ...) \
- pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_info(desc, fmt, ...) \
- pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
- ##__VA_ARGS__)
-#define gpiod_dbg(desc, fmt, ...) \
- pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
- ##__VA_ARGS__)
-#else
-#define gpiod_emerg(desc, fmt, ...) \
- pr_emerg("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_crit(desc, fmt, ...) \
- pr_crit("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_err(desc, fmt, ...) \
- pr_err("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_warn(desc, fmt, ...) \
- pr_warn("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_info(desc, fmt, ...) \
- pr_info("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#define gpiod_dbg(desc, fmt, ...) \
- pr_debug("gpio-%d: " fmt, desc_to_gpio(desc), ##__VA_ARGS__)
-#endif
-
-/* With chip prefix */
-
-#define chip_emerg(chip, fmt, ...) \
- pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
-#define chip_crit(chip, fmt, ...) \
- pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
-#define chip_err(chip, fmt, ...) \
- pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
-#define chip_warn(chip, fmt, ...) \
- pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
-#define chip_info(chip, fmt, ...) \
- pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
-#define chip_dbg(chip, fmt, ...) \
- pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+LIST_HEAD(gpio_chips);
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
-#ifdef CONFIG_DEBUG_FS
d->label = label;
-#endif
-}
-
-/*
- * Return the GPIO number of the passed descriptor relative to its chip
- */
-static int gpio_chip_hwgpio(const struct gpio_desc *desc)
-{
- return desc - &desc->chip->desc[0];
}
/**
@@ -174,7 +83,6 @@ struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip,
return &chip->desc[hwnum];
}
-EXPORT_SYMBOL_GPL(gpiochip_get_desc);
/**
* Convert a GPIO descriptor to the integer namespace.
@@ -188,39 +96,6 @@ int desc_to_gpio(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(desc_to_gpio);
-/* Warn when drivers omit gpio_request() calls -- legal but ill-advised
- * when setting direction, and otherwise illegal. Until board setup code
- * and drivers use explicit requests everywhere (which won't happen when
- * those calls have no teeth) we can't avoid autorequesting. This nag
- * message should motivate switching to explicit requests... so should
- * the weaker cleanup after faults, compared to gpio_request().
- *
- * NOTE: the autorequest mechanism is going away; at this point it's
- * only "legal" in the sense that (old) code using it won't break yet,
- * but instead only triggers a WARN() stack dump.
- */
-static int gpio_ensure_requested(struct gpio_desc *desc)
-{
- const struct gpio_chip *chip = desc->chip;
- const int gpio = desc_to_gpio(desc);
-
- if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0,
- "autorequest GPIO-%d\n", gpio)) {
- if (!try_module_get(chip->owner)) {
- gpiod_err(desc, "%s: module can't be gotten\n",
- __func__);
- clear_bit(FLAG_REQUESTED, &desc->flags);
- /* lose */
- return -EIO;
- }
- desc_set_label(desc, "[auto]");
- /* caller must chip->request() w/o spinlock */
- if (chip->request)
- return 1;
- }
- return 0;
-}
-
/**
* gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs
* @desc: descriptor to return the chip of
@@ -291,836 +166,6 @@ int gpiod_get_direction(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(gpiod_get_direction);
-#ifdef CONFIG_GPIO_SYSFS
-
-/* lock protects against unexport_gpio() being called while
- * sysfs files are active.
- */
-static DEFINE_MUTEX(sysfs_lock);
-
-/*
- * /sys/class/gpio/gpioN... only for GPIOs that are exported
- * /direction
- * * MAY BE OMITTED if kernel won't allow direction changes
- * * is read/write as "in" or "out"
- * * may also be written as "high" or "low", initializing
- * output value as specified ("out" implies "low")
- * /value
- * * always readable, subject to hardware behavior
- * * may be writable, as zero/nonzero
- * /edge
- * * configures behavior of poll(2) on /value
- * * available only if pin can generate IRQs on input
- * * is read/write as "none", "falling", "rising", or "both"
- * /active_low
- * * configures polarity of /value
- * * is read/write as zero/nonzero
- * * also affects existing and subsequent "falling" and "rising"
- * /edge configuration
- */
-
-static ssize_t gpio_direction_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags)) {
- status = -EIO;
- } else {
- gpiod_get_direction(desc);
- status = sprintf(buf, "%s\n",
- test_bit(FLAG_IS_OUT, &desc->flags)
- ? "out" : "in");
- }
-
- mutex_unlock(&sysfs_lock);
- return status;
-}
-
-static ssize_t gpio_direction_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- status = -EIO;
- else if (sysfs_streq(buf, "high"))
- status = gpiod_direction_output_raw(desc, 1);
- else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
- status = gpiod_direction_output_raw(desc, 0);
- else if (sysfs_streq(buf, "in"))
- status = gpiod_direction_input(desc);
- else
- status = -EINVAL;
-
- mutex_unlock(&sysfs_lock);
- return status ? : size;
-}
-
-static /* const */ DEVICE_ATTR(direction, 0644,
- gpio_direction_show, gpio_direction_store);
-
-static ssize_t gpio_value_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- status = -EIO;
- else
- status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc));
-
- mutex_unlock(&sysfs_lock);
- return status;
-}
-
-static ssize_t gpio_value_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- status = -EIO;
- else if (!test_bit(FLAG_IS_OUT, &desc->flags))
- status = -EPERM;
- else {
- long value;
-
- status = kstrtol(buf, 0, &value);
- if (status == 0) {
- gpiod_set_value_cansleep(desc, value);
- status = size;
- }
- }
-
- mutex_unlock(&sysfs_lock);
- return status;
-}
-
-static const DEVICE_ATTR(value, 0644,
- gpio_value_show, gpio_value_store);
-
-static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
-{
- struct kernfs_node *value_sd = priv;
-
- sysfs_notify_dirent(value_sd);
- return IRQ_HANDLED;
-}
-
-static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
- unsigned long gpio_flags)
-{
- struct kernfs_node *value_sd;
- unsigned long irq_flags;
- int ret, irq, id;
-
- if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags)
- return 0;
-
- irq = gpiod_to_irq(desc);
- if (irq < 0)
- return -EIO;
-
- id = desc->flags >> ID_SHIFT;
- value_sd = idr_find(&dirent_idr, id);
- if (value_sd)
- free_irq(irq, value_sd);
-
- desc->flags &= ~GPIO_TRIGGER_MASK;
-
- if (!gpio_flags) {
- gpiod_unlock_as_irq(desc);
- ret = 0;
- goto free_id;
- }
-
- irq_flags = IRQF_SHARED;
- if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
- if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
-
- if (!value_sd) {
- value_sd = sysfs_get_dirent(dev->kobj.sd, "value");
- if (!value_sd) {
- ret = -ENODEV;
- goto err_out;
- }
-
- ret = idr_alloc(&dirent_idr, value_sd, 1, 0, GFP_KERNEL);
- if (ret < 0)
- goto free_sd;
- id = ret;
-
- desc->flags &= GPIO_FLAGS_MASK;
- desc->flags |= (unsigned long)id << ID_SHIFT;
-
- if (desc->flags >> ID_SHIFT != id) {
- ret = -ERANGE;
- goto free_id;
- }
- }
-
- ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags,
- "gpiolib", value_sd);
- if (ret < 0)
- goto free_id;
-
- ret = gpiod_lock_as_irq(desc);
- if (ret < 0) {
- gpiod_warn(desc, "failed to flag the GPIO for IRQ\n");
- goto free_id;
- }
-
- desc->flags |= gpio_flags;
- return 0;
-
-free_id:
- idr_remove(&dirent_idr, id);
- desc->flags &= GPIO_FLAGS_MASK;
-free_sd:
- if (value_sd)
- sysfs_put(value_sd);
-err_out:
- return ret;
-}
-
-static const struct {
- const char *name;
- unsigned long flags;
-} trigger_types[] = {
- { "none", 0 },
- { "falling", BIT(FLAG_TRIG_FALL) },
- { "rising", BIT(FLAG_TRIG_RISE) },
- { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) },
-};
-
-static ssize_t gpio_edge_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- status = -EIO;
- else {
- int i;
-
- status = 0;
- for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
- if ((desc->flags & GPIO_TRIGGER_MASK)
- == trigger_types[i].flags) {
- status = sprintf(buf, "%s\n",
- trigger_types[i].name);
- break;
- }
- }
-
- mutex_unlock(&sysfs_lock);
- return status;
-}
-
-static ssize_t gpio_edge_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
- if (sysfs_streq(trigger_types[i].name, buf))
- goto found;
- return -EINVAL;
-
-found:
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- status = -EIO;
- else {
- status = gpio_setup_irq(desc, dev, trigger_types[i].flags);
- if (!status)
- status = size;
- }
-
- mutex_unlock(&sysfs_lock);
-
- return status;
-}
-
-static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
-
-static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
- int value)
-{
- int status = 0;
-
- if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
- return 0;
-
- if (value)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- else
- clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
-
- /* reconfigure poll(2) support if enabled on one edge only */
- if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
- !!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
- unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
-
- gpio_setup_irq(desc, dev, 0);
- status = gpio_setup_irq(desc, dev, trigger_flags);
- }
-
- return status;
-}
-
-static ssize_t gpio_active_low_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags))
- status = -EIO;
- else
- status = sprintf(buf, "%d\n",
- !!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
-
- mutex_unlock(&sysfs_lock);
-
- return status;
-}
-
-static ssize_t gpio_active_low_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
-{
- struct gpio_desc *desc = dev_get_drvdata(dev);
- ssize_t status;
-
- mutex_lock(&sysfs_lock);
-
- if (!test_bit(FLAG_EXPORT, &desc->flags)) {
- status = -EIO;
- } else {
- long value;
-
- status = kstrtol(buf, 0, &value);
- if (status == 0)
- status = sysfs_set_active_low(desc, dev, value != 0);
- }
-
- mutex_unlock(&sysfs_lock);
-
- return status ? : size;
-}
-
-static const DEVICE_ATTR(active_low, 0644,
- gpio_active_low_show, gpio_active_low_store);
-
-static const struct attribute *gpio_attrs[] = {
- &dev_attr_value.attr,
- &dev_attr_active_low.attr,
- NULL,
-};
-
-static const struct attribute_group gpio_attr_group = {
- .attrs = (struct attribute **) gpio_attrs,
-};
-
-/*
- * /sys/class/gpio/gpiochipN/
- * /base ... matching gpio_chip.base (N)
- * /label ... matching gpio_chip.label
- * /ngpio ... matching gpio_chip.ngpio
- */
-
-static ssize_t chip_base_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
-
- return sprintf(buf, "%d\n", chip->base);
-}
-static DEVICE_ATTR(base, 0444, chip_base_show, NULL);
-
-static ssize_t chip_label_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
-
- return sprintf(buf, "%s\n", chip->label ? : "");
-}
-static DEVICE_ATTR(label, 0444, chip_label_show, NULL);
-
-static ssize_t chip_ngpio_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- const struct gpio_chip *chip = dev_get_drvdata(dev);
-
- return sprintf(buf, "%u\n", chip->ngpio);
-}
-static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
-
-static const struct attribute *gpiochip_attrs[] = {
- &dev_attr_base.attr,
- &dev_attr_label.attr,
- &dev_attr_ngpio.attr,
- NULL,
-};
-
-static const struct attribute_group gpiochip_attr_group = {
- .attrs = (struct attribute **) gpiochip_attrs,
-};
-
-/*
- * /sys/class/gpio/export ... write-only
- * integer N ... number of GPIO to export (full access)
- * /sys/class/gpio/unexport ... write-only
- * integer N ... number of GPIO to unexport
- */
-static ssize_t export_store(struct class *class,
- struct class_attribute *attr,
- const char *buf, size_t len)
-{
- long gpio;
- struct gpio_desc *desc;
- int status;
-
- status = kstrtol(buf, 0, &gpio);
- if (status < 0)
- goto done;
-
- desc = gpio_to_desc(gpio);
- /* reject invalid GPIOs */
- if (!desc) {
- pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
- return -EINVAL;
- }
-
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
-
- status = gpiod_request(desc, "sysfs");
- if (status < 0) {
- if (status == -EPROBE_DEFER)
- status = -ENODEV;
- goto done;
- }
- status = gpiod_export(desc, true);
- if (status < 0)
- gpiod_free(desc);
- else
- set_bit(FLAG_SYSFS, &desc->flags);
-
-done:
- if (status)
- pr_debug("%s: status %d\n", __func__, status);
- return status ? : len;
-}
-
-static ssize_t unexport_store(struct class *class,
- struct class_attribute *attr,
- const char *buf, size_t len)
-{
- long gpio;
- struct gpio_desc *desc;
- int status;
-
- status = kstrtol(buf, 0, &gpio);
- if (status < 0)
- goto done;
-
- desc = gpio_to_desc(gpio);
- /* reject bogus commands (gpio_unexport ignores them) */
- if (!desc) {
- pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
- return -EINVAL;
- }
-
- status = -EINVAL;
-
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
- if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
- status = 0;
- gpiod_free(desc);
- }
-done:
- if (status)
- pr_debug("%s: status %d\n", __func__, status);
- return status ? : len;
-}
-
-static struct class_attribute gpio_class_attrs[] = {
- __ATTR(export, 0200, NULL, export_store),
- __ATTR(unexport, 0200, NULL, unexport_store),
- __ATTR_NULL,
-};
-
-static struct class gpio_class = {
- .name = "gpio",
- .owner = THIS_MODULE,
-
- .class_attrs = gpio_class_attrs,
-};
-
-
-/**
- * gpiod_export - export a GPIO through sysfs
- * @gpio: gpio to make available, already requested
- * @direction_may_change: true if userspace may change gpio direction
- * Context: arch_initcall or later
- *
- * When drivers want to make a GPIO accessible to userspace after they
- * have requested it -- perhaps while debugging, or as part of their
- * public interface -- they may use this routine. If the GPIO can
- * change direction (some can't) and the caller allows it, userspace
- * will see "direction" sysfs attribute which may be used to change
- * the gpio's direction. A "value" attribute will always be provided.
- *
- * Returns zero on success, else an error.
- */
-int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
-{
- unsigned long flags;
- int status;
- const char *ioname = NULL;
- struct device *dev;
- int offset;
-
- /* can't export until sysfs is available ... */
- if (!gpio_class.p) {
- pr_debug("%s: called too early!\n", __func__);
- return -ENOENT;
- }
-
- if (!desc) {
- pr_debug("%s: invalid gpio descriptor\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&sysfs_lock);
-
- spin_lock_irqsave(&gpio_lock, flags);
- if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
- test_bit(FLAG_EXPORT, &desc->flags)) {
- spin_unlock_irqrestore(&gpio_lock, flags);
- gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n",
- __func__,
- test_bit(FLAG_REQUESTED, &desc->flags),
- test_bit(FLAG_EXPORT, &desc->flags));
- status = -EPERM;
- goto fail_unlock;
- }
-
- if (!desc->chip->direction_input || !desc->chip->direction_output)
- direction_may_change = false;
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- offset = gpio_chip_hwgpio(desc);
- if (desc->chip->names && desc->chip->names[offset])
- ioname = desc->chip->names[offset];
-
- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u",
- desc_to_gpio(desc));
- if (IS_ERR(dev)) {
- status = PTR_ERR(dev);
- goto fail_unlock;
- }
-
- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
- if (status)
- goto fail_unregister_device;
-
- if (direction_may_change) {
- status = device_create_file(dev, &dev_attr_direction);
- if (status)
- goto fail_unregister_device;
- }
-
- if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
- !test_bit(FLAG_IS_OUT, &desc->flags))) {
- status = device_create_file(dev, &dev_attr_edge);
- if (status)
- goto fail_unregister_device;
- }
-
- set_bit(FLAG_EXPORT, &desc->flags);
- mutex_unlock(&sysfs_lock);
- return 0;
-
-fail_unregister_device:
- device_unregister(dev);
-fail_unlock:
- mutex_unlock(&sysfs_lock);
- gpiod_dbg(desc, "%s: status %d\n", __func__, status);
- return status;
-}
-EXPORT_SYMBOL_GPL(gpiod_export);
-
-static int match_export(struct device *dev, const void *data)
-{
- return dev_get_drvdata(dev) == data;
-}
-
-/**
- * gpiod_export_link - create a sysfs link to an exported GPIO node
- * @dev: device under which to create symlink
- * @name: name of the symlink
- * @gpio: gpio to create symlink to, already exported
- *
- * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
- * node. Caller is responsible for unlinking.
- *
- * Returns zero on success, else an error.
- */
-int gpiod_export_link(struct device *dev, const char *name,
- struct gpio_desc *desc)
-{
- int status = -EINVAL;
-
- if (!desc) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&sysfs_lock);
-
- if (test_bit(FLAG_EXPORT, &desc->flags)) {
- struct device *tdev;
-
- tdev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (tdev != NULL) {
- status = sysfs_create_link(&dev->kobj, &tdev->kobj,
- name);
- } else {
- status = -ENODEV;
- }
- }
-
- mutex_unlock(&sysfs_lock);
-
- if (status)
- gpiod_dbg(desc, "%s: status %d\n", __func__, status);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(gpiod_export_link);
-
-/**
- * gpiod_sysfs_set_active_low - set the polarity of gpio sysfs value
- * @gpio: gpio to change
- * @value: non-zero to use active low, i.e. inverted values
- *
- * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
- * The GPIO does not have to be exported yet. If poll(2) support has
- * been enabled for either rising or falling edge, it will be
- * reconfigured to follow the new polarity.
- *
- * Returns zero on success, else an error.
- */
-int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value)
-{
- struct device *dev = NULL;
- int status = -EINVAL;
-
- if (!desc) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return -EINVAL;
- }
-
- mutex_lock(&sysfs_lock);
-
- if (test_bit(FLAG_EXPORT, &desc->flags)) {
- dev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (dev == NULL) {
- status = -ENODEV;
- goto unlock;
- }
- }
-
- status = sysfs_set_active_low(desc, dev, value);
-
-unlock:
- mutex_unlock(&sysfs_lock);
-
- if (status)
- gpiod_dbg(desc, "%s: status %d\n", __func__, status);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(gpiod_sysfs_set_active_low);
-
-/**
- * gpiod_unexport - reverse effect of gpio_export()
- * @gpio: gpio to make unavailable
- *
- * This is implicit on gpio_free().
- */
-void gpiod_unexport(struct gpio_desc *desc)
-{
- int status = 0;
- struct device *dev = NULL;
-
- if (!desc) {
- pr_warn("%s: invalid GPIO\n", __func__);
- return;
- }
-
- mutex_lock(&sysfs_lock);
-
- if (test_bit(FLAG_EXPORT, &desc->flags)) {
-
- dev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (dev) {
- gpio_setup_irq(desc, dev, 0);
- clear_bit(FLAG_EXPORT, &desc->flags);
- } else
- status = -ENODEV;
- }
-
- mutex_unlock(&sysfs_lock);
-
- if (dev) {
- device_unregister(dev);
- put_device(dev);
- }
-
- if (status)
- gpiod_dbg(desc, "%s: status %d\n", __func__, status);
-}
-EXPORT_SYMBOL_GPL(gpiod_unexport);
-
-static int gpiochip_export(struct gpio_chip *chip)
-{
- int status;
- struct device *dev;
-
- /* Many systems register gpio chips for SOC support very early,
- * before driver model support is available. In those cases we
- * export this later, in gpiolib_sysfs_init() ... here we just
- * verify that _some_ field of gpio_class got initialized.
- */
- if (!gpio_class.p)
- return 0;
-
- /* use chip->base for the ID; it's already known to be unique */
- mutex_lock(&sysfs_lock);
- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
- "gpiochip%d", chip->base);
- if (!IS_ERR(dev)) {
- status = sysfs_create_group(&dev->kobj,
- &gpiochip_attr_group);
- } else
- status = PTR_ERR(dev);
- chip->exported = (status == 0);
- mutex_unlock(&sysfs_lock);
-
- if (status) {
- unsigned long flags;
- unsigned gpio;
-
- spin_lock_irqsave(&gpio_lock, flags);
- gpio = 0;
- while (gpio < chip->ngpio)
- chip->desc[gpio++].chip = NULL;
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- chip_dbg(chip, "%s: status %d\n", __func__, status);
- }
-
- return status;
-}
-
-static void gpiochip_unexport(struct gpio_chip *chip)
-{
- int status;
- struct device *dev;
-
- mutex_lock(&sysfs_lock);
- dev = class_find_device(&gpio_class, NULL, chip, match_export);
- if (dev) {
- put_device(dev);
- device_unregister(dev);
- chip->exported = false;
- status = 0;
- } else
- status = -ENODEV;
- mutex_unlock(&sysfs_lock);
-
- if (status)
- chip_dbg(chip, "%s: status %d\n", __func__, status);
-}
-
-static int __init gpiolib_sysfs_init(void)
-{
- int status;
- unsigned long flags;
- struct gpio_chip *chip;
-
- status = class_register(&gpio_class);
- if (status < 0)
- return status;
-
- /* Scan and register the gpio_chips which registered very
- * early (e.g. before the class_register above was called).
- *
- * We run before arch_initcall() so chip->dev nodes can have
- * registered, and so arch_initcall() can always gpio_export().
- */
- spin_lock_irqsave(&gpio_lock, flags);
- list_for_each_entry(chip, &gpio_chips, list) {
- if (!chip || chip->exported)
- continue;
-
- spin_unlock_irqrestore(&gpio_lock, flags);
- status = gpiochip_export(chip);
- spin_lock_irqsave(&gpio_lock, flags);
- }
- spin_unlock_irqrestore(&gpio_lock, flags);
-
-
- return status;
-}
-postcore_initcall(gpiolib_sysfs_init);
-
-#else
-static inline int gpiochip_export(struct gpio_chip *chip)
-{
- return 0;
-}
-
-static inline void gpiochip_unexport(struct gpio_chip *chip)
-{
-}
-
-#endif /* CONFIG_GPIO_SYSFS */
-
/*
* Add a new chip to the global chips list, keeping the list of chips sorted
* by base order.
@@ -1474,6 +519,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
{
unsigned int offset;
+ acpi_gpiochip_free_interrupts(gpiochip);
+
/* Remove all IRQ mappings and delete the domain */
if (gpiochip->irqdomain) {
for (offset = 0; offset < gpiochip->ngpio; offset++)
@@ -1567,6 +614,8 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
gpiochip->irq_base = irq_base;
}
+ acpi_gpiochip_request_interrupts(gpiochip);
+
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_add);
@@ -1740,7 +789,7 @@ done:
return status;
}
-static int gpiod_request(struct gpio_desc *desc, const char *label)
+int gpiod_request(struct gpio_desc *desc, const char *label)
{
int status = -EPROBE_DEFER;
struct gpio_chip *chip;
@@ -1767,12 +816,6 @@ done:
return status;
}
-int gpio_request(unsigned gpio, const char *label)
-{
- return gpiod_request(gpio_to_desc(gpio), label);
-}
-EXPORT_SYMBOL_GPL(gpio_request);
-
static bool __gpiod_free(struct gpio_desc *desc)
{
bool ret = false;
@@ -1805,7 +848,7 @@ static bool __gpiod_free(struct gpio_desc *desc)
return ret;
}
-static void gpiod_free(struct gpio_desc *desc)
+void gpiod_free(struct gpio_desc *desc)
{
if (desc && __gpiod_free(desc))
module_put(desc->chip->owner);
@@ -1813,101 +856,14 @@ static void gpiod_free(struct gpio_desc *desc)
WARN_ON(extra_checks);
}
-void gpio_free(unsigned gpio)
-{
- gpiod_free(gpio_to_desc(gpio));
-}
-EXPORT_SYMBOL_GPL(gpio_free);
-
-/**
- * gpio_request_one - request a single GPIO with initial configuration
- * @gpio: the GPIO number
- * @flags: GPIO configuration as specified by GPIOF_*
- * @label: a literal description string of this GPIO
- */
-int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
-{
- struct gpio_desc *desc;
- int err;
-
- desc = gpio_to_desc(gpio);
-
- err = gpiod_request(desc, label);
- if (err)
- return err;
-
- if (flags & GPIOF_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-
- if (flags & GPIOF_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-
- if (flags & GPIOF_DIR_IN)
- err = gpiod_direction_input(desc);
- else
- err = gpiod_direction_output_raw(desc,
- (flags & GPIOF_INIT_HIGH) ? 1 : 0);
-
- if (err)
- goto free_gpio;
-
- if (flags & GPIOF_EXPORT) {
- err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE);
- if (err)
- goto free_gpio;
- }
-
- return 0;
-
- free_gpio:
- gpiod_free(desc);
- return err;
-}
-EXPORT_SYMBOL_GPL(gpio_request_one);
-
-/**
- * gpio_request_array - request multiple GPIOs in a single call
- * @array: array of the 'struct gpio'
- * @num: how many GPIOs in the array
- */
-int gpio_request_array(const struct gpio *array, size_t num)
-{
- int i, err;
-
- for (i = 0; i < num; i++, array++) {
- err = gpio_request_one(array->gpio, array->flags, array->label);
- if (err)
- goto err_free;
- }
- return 0;
-
-err_free:
- while (i--)
- gpio_free((--array)->gpio);
- return err;
-}
-EXPORT_SYMBOL_GPL(gpio_request_array);
-
-/**
- * gpio_free_array - release multiple GPIOs in a single call
- * @array: array of the 'struct gpio'
- * @num: how many GPIOs in the array
- */
-void gpio_free_array(const struct gpio *array, size_t num)
-{
- while (num--)
- gpio_free((array++)->gpio);
-}
-EXPORT_SYMBOL_GPL(gpio_free_array);
-
/**
* gpiochip_is_requested - return string iff signal was requested
* @chip: controller managing the signal
* @offset: of signal within controller's 0..(ngpio - 1) range
*
* Returns NULL if the GPIO is not currently requested, else a string.
- * If debugfs support is enabled, the string returned is the label passed
- * to gpio_request(); otherwise it is a meaningless constant.
+ * The string returned is the label passed to gpio_request(); if none has been
+ * passed it is a meaningless, non-NULL constant.
*
* This function is for use by GPIO controller drivers. The label can
* help with diagnostics, and knowing that the signal is used as a GPIO
@@ -1924,11 +880,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
if (test_bit(FLAG_REQUESTED, &desc->flags) == 0)
return NULL;
-#ifdef CONFIG_DEBUG_FS
return desc->label;
-#else
- return "?";
-#endif
}
EXPORT_SYMBOL_GPL(gpiochip_is_requested);
@@ -1950,6 +902,7 @@ int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label)
return __gpiod_request(desc, label);
}
+EXPORT_SYMBOL_GPL(gpiochip_request_own_desc);
/**
* gpiochip_free_own_desc - Free GPIO requested by the chip driver
@@ -1963,6 +916,7 @@ void gpiochip_free_own_desc(struct gpio_desc *desc)
if (desc)
__gpiod_free(desc);
}
+EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
/* Drivers MUST set GPIO direction before making get/set calls. In
* some cases this is done in early boot, before IRQs are enabled.
@@ -1984,10 +938,8 @@ void gpiochip_free_own_desc(struct gpio_desc *desc)
*/
int gpiod_direction_input(struct gpio_desc *desc)
{
- unsigned long flags;
struct gpio_chip *chip;
int status = -EINVAL;
- int offset;
if (!desc || !desc->chip) {
pr_warn("%s: invalid GPIO\n", __func__);
@@ -2002,52 +954,20 @@ int gpiod_direction_input(struct gpio_desc *desc)
return -EIO;
}
- spin_lock_irqsave(&gpio_lock, flags);
-
- status = gpio_ensure_requested(desc);
- if (status < 0)
- goto fail;
-
- /* now we know the gpio is valid and chip won't vanish */
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- might_sleep_if(chip->can_sleep);
-
- offset = gpio_chip_hwgpio(desc);
- if (status) {
- status = chip->request(chip, offset);
- if (status < 0) {
- gpiod_dbg(desc, "%s: chip request fail, %d\n",
- __func__, status);
- /* and it's not available to anyone else ...
- * gpio_request() is the fully clean solution.
- */
- goto lose;
- }
- }
-
- status = chip->direction_input(chip, offset);
+ status = chip->direction_input(chip, gpio_chip_hwgpio(desc));
if (status == 0)
clear_bit(FLAG_IS_OUT, &desc->flags);
trace_gpio_direction(desc_to_gpio(desc), 1, status);
-lose:
- return status;
-fail:
- spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
- gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+
return status;
}
EXPORT_SYMBOL_GPL(gpiod_direction_input);
static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
- unsigned long flags;
struct gpio_chip *chip;
int status = -EINVAL;
- int offset;
/* GPIOs used for IRQs shall not be set as output */
if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) {
@@ -2073,42 +993,11 @@ static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value)
return -EIO;
}
- spin_lock_irqsave(&gpio_lock, flags);
-
- status = gpio_ensure_requested(desc);
- if (status < 0)
- goto fail;
-
- /* now we know the gpio is valid and chip won't vanish */
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- might_sleep_if(chip->can_sleep);
-
- offset = gpio_chip_hwgpio(desc);
- if (status) {
- status = chip->request(chip, offset);
- if (status < 0) {
- gpiod_dbg(desc, "%s: chip request fail, %d\n",
- __func__, status);
- /* and it's not available to anyone else ...
- * gpio_request() is the fully clean solution.
- */
- goto lose;
- }
- }
-
- status = chip->direction_output(chip, offset, value);
+ status = chip->direction_output(chip, gpio_chip_hwgpio(desc), value);
if (status == 0)
set_bit(FLAG_IS_OUT, &desc->flags);
trace_gpio_value(desc_to_gpio(desc), 0, value);
trace_gpio_direction(desc_to_gpio(desc), 0, status);
-lose:
- return status;
-fail:
- spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
- gpiod_dbg(desc, "%s: gpio status %d\n", __func__, status);
return status;
}
@@ -2167,10 +1056,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output);
*/
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
- unsigned long flags;
struct gpio_chip *chip;
- int status = -EINVAL;
- int offset;
if (!desc || !desc->chip) {
pr_warn("%s: invalid GPIO\n", __func__);
@@ -2185,27 +1071,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
return -ENOTSUPP;
}
- spin_lock_irqsave(&gpio_lock, flags);
-
- status = gpio_ensure_requested(desc);
- if (status < 0)
- goto fail;
-
- /* now we know the gpio is valid and chip won't vanish */
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- might_sleep_if(chip->can_sleep);
-
- offset = gpio_chip_hwgpio(desc);
- return chip->set_debounce(chip, offset, debounce);
-
-fail:
- spin_unlock_irqrestore(&gpio_lock, flags);
- if (status)
- gpiod_dbg(desc, "%s: status %d\n", __func__, status);
-
- return status;
+ return chip->set_debounce(chip, gpio_chip_hwgpio(desc), debounce);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -2448,54 +1314,44 @@ int gpiod_to_irq(const struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_to_irq);
/**
- * gpiod_lock_as_irq() - lock a GPIO to be used as IRQ
- * @gpio: the GPIO line to lock as used for IRQ
+ * gpio_lock_as_irq() - lock a GPIO to be used as IRQ
+ * @chip: the chip the GPIO to lock belongs to
+ * @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to lock down
* a certain GPIO line to be used for IRQs.
*/
-int gpiod_lock_as_irq(struct gpio_desc *desc)
+int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (!desc)
+ if (offset >= chip->ngpio)
return -EINVAL;
- if (test_bit(FLAG_IS_OUT, &desc->flags)) {
- gpiod_err(desc,
+ if (test_bit(FLAG_IS_OUT, &chip->desc[offset].flags)) {
+ chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &desc->flags);
+ set_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
return 0;
}
-EXPORT_SYMBOL_GPL(gpiod_lock_as_irq);
-
-int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
-{
- return gpiod_lock_as_irq(gpiochip_get_desc(chip, offset));
-}
EXPORT_SYMBOL_GPL(gpio_lock_as_irq);
/**
- * gpiod_unlock_as_irq() - unlock a GPIO used as IRQ
- * @gpio: the GPIO line to unlock from IRQ usage
+ * gpio_unlock_as_irq() - unlock a GPIO used as IRQ
+ * @chip: the chip the GPIO to lock belongs to
+ * @offset: the offset of the GPIO to lock as IRQ
*
* This is used directly by GPIO drivers that want to indicate
* that a certain GPIO is no longer used exclusively for IRQ.
*/
-void gpiod_unlock_as_irq(struct gpio_desc *desc)
+void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (!desc)
+ if (offset >= chip->ngpio)
return;
- clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
-}
-EXPORT_SYMBOL_GPL(gpiod_unlock_as_irq);
-
-void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset)
-{
- return gpiod_unlock_as_irq(gpiochip_get_desc(chip, offset));
+ clear_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags);
}
EXPORT_SYMBOL_GPL(gpio_unlock_as_irq);
@@ -2726,38 +1582,43 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
* gpiod_get - obtain a GPIO for a given GPIO function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
*
* Return the GPIO descriptor corresponding to the function con_id of device
* dev, -ENOENT if no GPIO has been assigned to the requested function, or
* another IS_ERR() code if an error occured while trying to acquire the GPIO.
*/
-struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id)
+struct gpio_desc *__must_check __gpiod_get(struct device *dev, const char *con_id,
+ enum gpiod_flags flags)
{
- return gpiod_get_index(dev, con_id, 0);
+ return gpiod_get_index(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL_GPL(gpiod_get);
+EXPORT_SYMBOL_GPL(__gpiod_get);
/**
* gpiod_get_optional - obtain an optional GPIO for a given GPIO function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
+ * @flags: optional GPIO initialization flags
*
* This is equivalent to gpiod_get(), except that when no GPIO was assigned to
* the requested function it will return NULL. This is convenient for drivers
* that need to handle optional GPIOs.
*/
-struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
- const char *con_id)
+struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev,
+ const char *con_id,
+ enum gpiod_flags flags)
{
- return gpiod_get_index_optional(dev, con_id, 0);
+ return gpiod_get_index_optional(dev, con_id, 0, flags);
}
-EXPORT_SYMBOL_GPL(gpiod_get_optional);
+EXPORT_SYMBOL_GPL(__gpiod_get_optional);
/**
* gpiod_get_index - obtain a GPIO from a multi-index GPIO function
* @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
* @idx: index of the GPIO to obtain in the consumer
+ * @flags: optional GPIO initialization flags
*
* This variant of gpiod_get() allows to access GPIOs other than the first
* defined one for functions that define several GPIOs.
@@ -2766,23 +1627,24 @@ EXPORT_SYMBOL_GPL(gpiod_get_optional);
* requested function and/or index, or another IS_ERR() code if an error
* occured while trying to acquire the GPIO.
*/
-struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
+struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
const char *con_id,
- unsigned int idx)
+ unsigned int idx,
+ enum gpiod_flags flags)
{
struct gpio_desc *desc = NULL;
int status;
- enum gpio_lookup_flags flags = 0;
+ enum gpio_lookup_flags lookupflags = 0;
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
/* Using device tree? */
if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) {
dev_dbg(dev, "using device tree for GPIO lookup\n");
- desc = of_find_gpio(dev, con_id, idx, &flags);
+ desc = of_find_gpio(dev, con_id, idx, &lookupflags);
} else if (IS_ENABLED(CONFIG_ACPI) && dev && ACPI_HANDLE(dev)) {
dev_dbg(dev, "using ACPI for GPIO lookup\n");
- desc = acpi_find_gpio(dev, con_id, idx, &flags);
+ desc = acpi_find_gpio(dev, con_id, idx, &lookupflags);
}
/*
@@ -2791,7 +1653,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
*/
if (!desc || desc == ERR_PTR(-ENOENT)) {
dev_dbg(dev, "using lookup tables for GPIO lookup");
- desc = gpiod_find(dev, con_id, idx, &flags);
+ desc = gpiod_find(dev, con_id, idx, &lookupflags);
}
if (IS_ERR(desc)) {
@@ -2804,16 +1666,33 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
if (status < 0)
return ERR_PTR(status);
- if (flags & GPIO_ACTIVE_LOW)
+ if (lookupflags & GPIO_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
- if (flags & GPIO_OPEN_DRAIN)
+ if (lookupflags & GPIO_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &desc->flags);
- if (flags & GPIO_OPEN_SOURCE)
+ if (lookupflags & GPIO_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ /* No particular flag request, return here... */
+ if (flags & GPIOD_FLAGS_BIT_DIR_SET)
+ return desc;
+
+ /* Process flags */
+ if (flags & GPIOD_FLAGS_BIT_DIR_OUT)
+ status = gpiod_direction_output(desc,
+ flags & GPIOD_FLAGS_BIT_DIR_VAL);
+ else
+ status = gpiod_direction_input(desc);
+
+ if (status < 0) {
+ dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
+ gpiod_put(desc);
+ return ERR_PTR(status);
+ }
+
return desc;
}
-EXPORT_SYMBOL_GPL(gpiod_get_index);
+EXPORT_SYMBOL_GPL(__gpiod_get_index);
/**
* gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO
@@ -2821,18 +1700,20 @@ EXPORT_SYMBOL_GPL(gpiod_get_index);
* @dev: GPIO consumer, can be NULL for system-global GPIOs
* @con_id: function within the GPIO consumer
* @index: index of the GPIO to obtain in the consumer
+ * @flags: optional GPIO initialization flags
*
* This is equivalent to gpiod_get_index(), except that when no GPIO with the
* specified index was assigned to the requested function it will return NULL.
* This is convenient for drivers that need to handle optional GPIOs.
*/
-struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
+struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev,
const char *con_id,
- unsigned int index)
+ unsigned int index,
+ enum gpiod_flags flags)
{
struct gpio_desc *desc;
- desc = gpiod_get_index(dev, con_id, index);
+ desc = gpiod_get_index(dev, con_id, index, flags);
if (IS_ERR(desc)) {
if (PTR_ERR(desc) == -ENOENT)
return NULL;
@@ -2840,7 +1721,7 @@ struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
return desc;
}
-EXPORT_SYMBOL_GPL(gpiod_get_index_optional);
+EXPORT_SYMBOL_GPL(__gpiod_get_index_optional);
/**
* gpiod_put - dispose of a GPIO descriptor
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 1a4103dd38df..9db2b6a71c5d 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -31,12 +31,21 @@ struct acpi_gpio_info {
void acpi_gpiochip_add(struct gpio_chip *chip);
void acpi_gpiochip_remove(struct gpio_chip *chip);
+void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
+void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
+
struct gpio_desc *acpi_get_gpiod_by_index(struct device *dev, int index,
struct acpi_gpio_info *info);
#else
static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
+static inline void
+acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
+
+static inline void
+acpi_gpiochip_free_interrupts(struct gpio_chip *chip) { }
+
static inline struct gpio_desc *
acpi_get_gpiod_by_index(struct device *dev, int index,
struct acpi_gpio_info *info)
@@ -45,10 +54,100 @@ acpi_get_gpiod_by_index(struct device *dev, int index,
}
#endif
-int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label);
-void gpiochip_free_own_desc(struct gpio_desc *desc);
-
struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np,
const char *list_name, int index, enum of_gpio_flags *flags);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, u16 hwnum);
+
+extern struct spinlock gpio_lock;
+extern struct list_head gpio_chips;
+
+struct gpio_desc {
+ struct gpio_chip *chip;
+ unsigned long flags;
+/* flag symbols are bit numbers */
+#define FLAG_REQUESTED 0
+#define FLAG_IS_OUT 1
+#define FLAG_EXPORT 2 /* protected by sysfs_lock */
+#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
+#define FLAG_TRIG_FALL 4 /* trigger on falling edge */
+#define FLAG_TRIG_RISE 5 /* trigger on rising edge */
+#define FLAG_ACTIVE_LOW 6 /* value has active low */
+#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
+#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
+#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+
+#define ID_SHIFT 16 /* add new flags before this one */
+
+#define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1)
+#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE))
+
+ const char *label;
+};
+
+int gpiod_request(struct gpio_desc *desc, const char *label);
+void gpiod_free(struct gpio_desc *desc);
+
+/*
+ * Return the GPIO number of the passed descriptor relative to its chip
+ */
+static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc)
+{
+ return desc - &desc->chip->desc[0];
+}
+
+/* With descriptor prefix */
+
+#define gpiod_emerg(desc, fmt, ...) \
+ pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
+ ##__VA_ARGS__)
+#define gpiod_crit(desc, fmt, ...) \
+ pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
+ ##__VA_ARGS__)
+#define gpiod_err(desc, fmt, ...) \
+ pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
+ ##__VA_ARGS__)
+#define gpiod_warn(desc, fmt, ...) \
+ pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
+ ##__VA_ARGS__)
+#define gpiod_info(desc, fmt, ...) \
+ pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
+ ##__VA_ARGS__)
+#define gpiod_dbg(desc, fmt, ...) \
+ pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
+ ##__VA_ARGS__)
+
+/* With chip prefix */
+
+#define chip_emerg(chip, fmt, ...) \
+ pr_emerg("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_crit(chip, fmt, ...) \
+ pr_crit("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_err(chip, fmt, ...) \
+ pr_err("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_warn(chip, fmt, ...) \
+ pr_warn("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_info(chip, fmt, ...) \
+ pr_info("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+#define chip_dbg(chip, fmt, ...) \
+ pr_debug("GPIO chip %s: " fmt, chip->label, ##__VA_ARGS__)
+
+#ifdef CONFIG_GPIO_SYSFS
+
+int gpiochip_export(struct gpio_chip *chip);
+void gpiochip_unexport(struct gpio_chip *chip);
+
+#else
+
+static inline int gpiochip_export(struct gpio_chip *chip)
+{
+ return 0;
+}
+
+static inline void gpiochip_unexport(struct gpio_chip *chip)
+{
+}
+
+#endif /* CONFIG_GPIO_SYSFS */
+
#endif /* GPIOLIB_H */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5120046ff80..b066bb3ca01a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -114,6 +114,7 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
+ select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -201,3 +202,5 @@ source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
+
+source "drivers/gpu/drm/sti/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index dd2ba4269740..4a55d59ccd22 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,8 +6,8 @@ ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
- drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
+ drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+ drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
@@ -20,11 +20,12 @@ drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
+drm-$(CONFIG_OF) += drm_of.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
- drm_plane_helper.o
+ drm_plane_helper.o drm_dp_mst_topology.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
+obj-$(CONFIG_DRM_STI) += sti/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 59948eff6095..ad3d2ebf95c9 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -15,20 +15,19 @@
#include "armada_drm.h"
#include "armada_hw.h"
-static int armada510_init(struct armada_private *priv, struct device *dev)
+static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
{
- priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+ struct clk *clk;
- if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
- priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+ clk = devm_clk_get(dev, "ext_ref_clk1");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER : PTR_ERR(clk);
- return PTR_RET(priv->extclk[0]);
-}
+ dcrtc->extclk[0] = clk;
-static int armada510_crtc_init(struct armada_crtc *dcrtc)
-{
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+
return 0;
}
@@ -45,8 +44,7 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc)
static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct drm_display_mode *mode, uint32_t *sclk)
{
- struct armada_private *priv = dcrtc->crtc.dev->dev_private;
- struct clk *clk = priv->extclk[0];
+ struct clk *clk = dcrtc->extclk[0];
int ret;
if (dcrtc->num == 1)
@@ -81,7 +79,6 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
- .init = armada510_init,
- .crtc_init = armada510_crtc_init,
- .crtc_compute_clock = armada510_crtc_compute_clock,
+ .init = armada510_crtc_init,
+ .compute_clock = armada510_crtc_compute_clock,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3aedf9e993e6..9a0cc09e6653 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,6 +7,9 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
@@ -332,24 +335,23 @@ static void armada_drm_crtc_commit(struct drm_crtc *crtc)
static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, struct drm_display_mode *adj)
{
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
int ret;
/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
- if (!priv->variant->has_spu_adv_reg &&
+ if (!dcrtc->variant->has_spu_adv_reg &&
adj->flags & DRM_MODE_FLAG_INTERLACE)
return false;
/* Check whether the display mode is possible */
- ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+ ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
if (ret)
return false;
return true;
}
-void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
struct armada_vbl_event *e, *n;
void __iomem *base = dcrtc->base;
@@ -410,6 +412,27 @@ void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
}
}
+static irqreturn_t armada_drm_irq(int irq, void *arg)
+{
+ struct armada_crtc *dcrtc = arg;
+ u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /*
+ * This is rediculous - rather than writing bits to clear, we
+ * have to set the actual status register value. This is racy.
+ */
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /* Mask out those interrupts we haven't enabled */
+ v = stat & dcrtc->irq_ena;
+
+ if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+ armada_drm_crtc_irq(dcrtc, stat);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
/* These are locked by dev->vbl_lock */
void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
{
@@ -470,7 +493,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *adj,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
@@ -515,7 +537,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
}
/* Now compute the divider for real */
- priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+ dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
/* Ensure graphic fifo is enabled */
armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
@@ -537,7 +559,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- priv->variant->spu_adv_reg;
+ dcrtc->variant->spu_adv_reg;
if (interlaced) {
/* Odd interlaced frame */
@@ -546,7 +568,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- priv->variant->spu_adv_reg;
+ dcrtc->variant->spu_adv_reg;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
@@ -561,7 +583,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
- if (priv->variant->has_spu_adv_reg) {
+ if (dcrtc->variant->has_spu_adv_reg) {
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
@@ -805,12 +827,11 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_gem_object *obj = NULL;
int ret;
/* If no cursor support, replicate drm's return value */
- if (!priv->variant->has_spu_adv_reg)
+ if (!dcrtc->variant->has_spu_adv_reg)
return -ENXIO;
if (handle && w > 0 && h > 0) {
@@ -858,11 +879,10 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_private *priv = crtc->dev->dev_private;
int ret;
/* If no cursor support, replicate drm's return value */
- if (!priv->variant->has_spu_adv_reg)
+ if (!dcrtc->variant->has_spu_adv_reg)
return -EFAULT;
mutex_lock(&dev->struct_mutex);
@@ -888,6 +908,10 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
if (!IS_ERR(dcrtc->clk))
clk_disable_unprepare(dcrtc->clk);
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+
+ of_node_put(dcrtc->crtc.port);
+
kfree(dcrtc);
}
@@ -1027,19 +1051,20 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev)
return 0;
}
-int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
- struct resource *res)
+int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
+ struct resource *res, int irq, const struct armada_variant *variant,
+ struct device_node *port)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm->dev_private;
struct armada_crtc *dcrtc;
void __iomem *base;
int ret;
- ret = armada_drm_crtc_create_properties(dev);
+ ret = armada_drm_crtc_create_properties(drm);
if (ret)
return ret;
- base = devm_ioremap_resource(dev->dev, res);
+ base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1049,8 +1074,12 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
return -ENOMEM;
}
+ if (dev != drm->dev)
+ dev_set_drvdata(dev, dcrtc);
+
+ dcrtc->variant = variant;
dcrtc->base = base;
- dcrtc->num = num;
+ dcrtc->num = drm->mode_config.num_crtc;
dcrtc->clk = ERR_PTR(-EINVAL);
dcrtc->csc_yuv_mode = CSC_AUTO;
dcrtc->csc_rgb_mode = CSC_AUTO;
@@ -1072,9 +1101,18 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
+ dcrtc);
+ if (ret < 0) {
+ kfree(dcrtc);
+ return ret;
+ }
- if (priv->variant->crtc_init) {
- ret = priv->variant->crtc_init(dcrtc);
+ if (dcrtc->variant->init) {
+ ret = dcrtc->variant->init(dcrtc, dev);
if (ret) {
kfree(dcrtc);
return ret;
@@ -1086,7 +1124,8 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
priv->dcrtc[dcrtc->num] = dcrtc;
- drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+ dcrtc->crtc.port = port;
+ drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
@@ -1094,5 +1133,107 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
dcrtc->csc_rgb_mode);
- return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+ return armada_overlay_plane_create(drm, 1 << dcrtc->num);
+}
+
+static int
+armada_lcd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+ const struct armada_variant *variant;
+ struct device_node *port = NULL;
+
+ if (irq < 0)
+ return irq;
+
+ if (!dev->of_node) {
+ const struct platform_device_id *id;
+
+ id = platform_get_device_id(pdev);
+ if (!id)
+ return -ENXIO;
+
+ variant = (const struct armada_variant *)id->driver_data;
+ } else {
+ const struct of_device_id *match;
+ struct device_node *np, *parent = dev->of_node;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match)
+ return -ENXIO;
+
+ np = of_get_child_by_name(parent, "ports");
+ if (np)
+ parent = np;
+ port = of_get_child_by_name(parent, "port");
+ of_node_put(np);
+ if (!port) {
+ dev_err(dev, "no port node found in %s\n",
+ parent->full_name);
+ return -ENXIO;
+ }
+
+ variant = match->data;
+ }
+
+ return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
+}
+
+static void
+armada_lcd_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct armada_crtc *dcrtc = dev_get_drvdata(dev);
+
+ armada_drm_crtc_destroy(&dcrtc->crtc);
}
+
+static const struct component_ops armada_lcd_ops = {
+ .bind = armada_lcd_bind,
+ .unbind = armada_lcd_unbind,
+};
+
+static int armada_lcd_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &armada_lcd_ops);
+}
+
+static int armada_lcd_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &armada_lcd_ops);
+ return 0;
+}
+
+static struct of_device_id armada_lcd_of_match[] = {
+ {
+ .compatible = "marvell,dove-lcd",
+ .data = &armada510_ops,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
+
+static const struct platform_device_id armada_lcd_platform_ids[] = {
+ {
+ .name = "armada-lcd",
+ .driver_data = (unsigned long)&armada510_ops,
+ }, {
+ .name = "armada-510-lcd",
+ .driver_data = (unsigned long)&armada510_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
+
+struct platform_driver armada_lcd_platform_driver = {
+ .probe = armada_lcd_probe,
+ .remove = armada_lcd_remove,
+ .driver = {
+ .name = "armada-lcd",
+ .owner = THIS_MODULE,
+ .of_match_table = armada_lcd_of_match,
+ },
+ .id_table = armada_lcd_platform_ids,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 9c10a07e7492..98102a5a9af5 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,12 +32,15 @@ struct armada_regs {
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_frame_work;
+struct armada_variant;
struct armada_crtc {
struct drm_crtc crtc;
+ const struct armada_variant *variant;
unsigned num;
void __iomem *base;
struct clk *clk;
+ struct clk *extclk[2];
struct {
uint32_t spu_v_h_total;
uint32_t spu_v_porch;
@@ -72,12 +75,16 @@ struct armada_crtc {
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
-int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+struct device_node;
+int armada_drm_crtc_create(struct drm_device *, struct device *,
+ struct resource *, int, const struct armada_variant *,
+ struct device_node *);
void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
-void armada_drm_crtc_irq(struct armada_crtc *, u32);
void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+extern struct platform_driver armada_lcd_platform_driver;
+
#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index a72cae03b99b..ea63c6c7c66f 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -59,26 +59,23 @@ void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
struct armada_private;
struct armada_variant {
- bool has_spu_adv_reg;
+ bool has_spu_adv_reg;
uint32_t spu_adv_reg;
- int (*init)(struct armada_private *, struct device *);
- int (*crtc_init)(struct armada_crtc *);
- int (*crtc_compute_clock)(struct armada_crtc *,
- const struct drm_display_mode *,
- uint32_t *);
+ int (*init)(struct armada_crtc *, struct device *);
+ int (*compute_clock)(struct armada_crtc *,
+ const struct drm_display_mode *,
+ uint32_t *);
};
/* Variant ops */
extern const struct armada_variant armada510_ops;
struct armada_private {
- const struct armada_variant *variant;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear;
- struct clk *extclk[2];
struct drm_property *csc_yuv_prop;
struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 8ab3cd1a8cdb..e2d5792b140f 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -6,7 +6,9 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
@@ -52,6 +54,11 @@ static const struct armada_drm_slave_config tda19988_config = {
};
#endif
+static bool is_componentized(struct device *dev)
+{
+ return dev->of_node || dev->platform_data;
+}
+
static void armada_drm_unref_work(struct work_struct *work)
{
struct armada_private *priv =
@@ -85,6 +92,7 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
const struct platform_device_id *id;
+ const struct armada_variant *variant;
struct armada_private *priv;
struct resource *res[ARRAY_SIZE(priv->dcrtc)];
struct resource *mem = NULL;
@@ -107,7 +115,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
return -EINVAL;
}
- if (!res[0] || !mem)
+ if (!mem)
return -ENXIO;
if (!devm_request_mem_region(dev->dev, mem->start,
@@ -128,11 +136,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
if (!id)
return -ENXIO;
- priv->variant = (struct armada_variant *)id->driver_data;
-
- ret = priv->variant->init(priv, dev->dev);
- if (ret)
- return ret;
+ variant = (const struct armada_variant *)id->driver_data;
INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
INIT_KFIFO(priv->fb_unref);
@@ -155,40 +159,50 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
/* Create all LCD controllers */
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ int irq;
+
if (!res[n])
break;
- ret = armada_drm_crtc_create(dev, n, res[n]);
+ irq = platform_get_irq(dev->platformdev, n);
+ if (irq < 0)
+ goto err_kms;
+
+ ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
+ variant, NULL);
if (ret)
goto err_kms;
}
+ if (is_componentized(dev->dev)) {
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ goto err_kms;
+ } else {
#ifdef CONFIG_DRM_ARMADA_TDA1998X
- ret = armada_drm_connector_slave_create(dev, &tda19988_config);
- if (ret)
- goto err_kms;
+ ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+ if (ret)
+ goto err_kms;
#endif
+ }
- ret = drm_vblank_init(dev, n);
- if (ret)
- goto err_kms;
-
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
- goto err_kms;
+ goto err_comp;
dev->vblank_disable_allowed = 1;
ret = armada_fbdev_init(dev);
if (ret)
- goto err_irq;
+ goto err_comp;
drm_kms_helper_poll_init(dev);
return 0;
- err_irq:
- drm_irq_uninstall(dev);
+ err_comp:
+ if (is_componentized(dev->dev))
+ component_unbind_all(dev->dev, dev);
err_kms:
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
@@ -203,7 +217,10 @@ static int armada_drm_unload(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
armada_fbdev_fini(dev);
- drm_irq_uninstall(dev);
+
+ if (is_componentized(dev->dev))
+ component_unbind_all(dev->dev, dev);
+
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
flush_work(&priv->fb_unref_work);
@@ -259,52 +276,6 @@ static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
}
-static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = arg;
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
- uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
- irqreturn_t handled = IRQ_NONE;
-
- /*
- * This is rediculous - rather than writing bits to clear, we
- * have to set the actual status register value. This is racy.
- */
- writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
-
- /* Mask out those interrupts we haven't enabled */
- v = stat & dcrtc->irq_ena;
-
- if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
- armada_drm_crtc_irq(dcrtc, stat);
- handled = IRQ_HANDLED;
- }
-
- return handled;
-}
-
-static int armada_drm_irq_postinstall(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
-
- spin_lock_irq(&dev->vbl_lock);
- writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
- writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
- spin_unlock_irq(&dev->vbl_lock);
-
- return 0;
-}
-
-static void armada_drm_irq_uninstall(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
-
- writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
-}
-
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
DRM_UNLOCKED),
@@ -340,9 +311,6 @@ static struct drm_driver armada_drm_driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
- .irq_handler = armada_drm_irq_handler,
- .irq_postinstall = armada_drm_irq_postinstall,
- .irq_uninstall = armada_drm_irq_uninstall,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = armada_drm_debugfs_init,
.debugfs_cleanup = armada_drm_debugfs_cleanup,
@@ -362,19 +330,140 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_HAVE_IRQ | DRIVER_PRIME,
+ DRIVER_PRIME,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
+static int armada_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+}
+
+static void armada_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int compare_dev_name(struct device *dev, void *data)
+{
+ const char *name = data;
+ return !strcmp(dev_name(dev), name);
+}
+
+static void armada_add_endpoints(struct device *dev,
+ struct component_match **match, struct device_node *port)
+{
+ struct device_node *ep, *remote;
+
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent device of %s is not available\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(dev, match, compare_of, remote);
+ of_node_put(remote);
+ }
+}
+
+static int armada_drm_find_components(struct device *dev,
+ struct component_match **match)
+{
+ struct device_node *port;
+ int i;
+
+ if (dev->of_node) {
+ struct device_node *np = dev->of_node;
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ component_match_add(dev, match, compare_of, port);
+ of_node_put(port);
+ }
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ armada_add_endpoints(dev, match, port);
+ of_node_put(port);
+ }
+ } else if (dev->platform_data) {
+ char **devices = dev->platform_data;
+ struct device *d;
+
+ for (i = 0; devices[i]; i++)
+ component_match_add(dev, match, compare_dev_name,
+ devices[i]);
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; devices[i]; i++) {
+ d = bus_find_device_by_name(&platform_bus_type, NULL,
+ devices[i]);
+ if (d && d->of_node) {
+ for_each_child_of_node(d->of_node, port)
+ armada_add_endpoints(dev, match, port);
+ }
+ put_device(d);
+ }
+ }
+
+ return 0;
+}
+
+static const struct component_master_ops armada_master_ops = {
+ .bind = armada_drm_bind,
+ .unbind = armada_drm_unbind,
+};
+
static int armada_drm_probe(struct platform_device *pdev)
{
- return drm_platform_init(&armada_drm_driver, pdev);
+ if (is_componentized(&pdev->dev)) {
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = armada_drm_find_components(&pdev->dev, &match);
+ if (ret < 0)
+ return ret;
+
+ return component_master_add_with_match(&pdev->dev,
+ &armada_master_ops, match);
+ } else {
+ return drm_platform_init(&armada_drm_driver, pdev);
+ }
}
static int armada_drm_remove(struct platform_device *pdev)
{
- drm_put_dev(platform_get_drvdata(pdev));
+ if (is_componentized(&pdev->dev))
+ component_master_del(&pdev->dev, &armada_master_ops);
+ else
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
@@ -402,14 +491,24 @@ static struct platform_driver armada_drm_platform_driver = {
static int __init armada_drm_init(void)
{
+ int ret;
+
armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
- return platform_driver_register(&armada_drm_platform_driver);
+
+ ret = platform_driver_register(&armada_lcd_platform_driver);
+ if (ret)
+ return ret;
+ ret = platform_driver_register(&armada_drm_platform_driver);
+ if (ret)
+ platform_driver_unregister(&armada_lcd_platform_driver);
+ return ret;
}
module_init(armada_drm_init);
static void __exit armada_drm_exit(void)
{
platform_driver_unregister(&armada_drm_platform_driver);
+ platform_driver_unregister(&armada_lcd_platform_driver);
}
module_exit(armada_drm_exit);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index fd166f532ab9..7838e731b0de 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -131,7 +131,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
return ret;
}
-static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
.gamma_set = armada_drm_crtc_gamma_set,
.gamma_get = armada_drm_crtc_gamma_get,
.fb_probe = armada_fb_probe,
@@ -149,7 +149,7 @@ int armada_fbdev_init(struct drm_device *dev)
priv->fbdev = fbh;
- fbh->funcs = &armada_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
ret = drm_fb_helper_init(dev, fbh, 1, 1);
if (ret) {
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
index d685a5421485..abbc309fe539 100644
--- a/drivers/gpu/drm/armada/armada_output.c
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -48,7 +48,7 @@ static void armada_drm_connector_destroy(struct drm_connector *conn)
{
struct armada_connector *dconn = drm_to_armada_conn(conn);
- drm_sysfs_connector_remove(conn);
+ drm_connector_unregister(conn);
drm_connector_cleanup(conn);
kfree(dconn);
}
@@ -141,7 +141,7 @@ int armada_output_create(struct drm_device *dev,
if (ret)
goto err_conn;
- ret = drm_sysfs_connector_add(&dconn->conn);
+ ret = drm_connector_register(&dconn->conn);
if (ret)
goto err_sysfs;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5d6a87573c33..957d4fabf1e1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -362,7 +362,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index a28640f47c27..cba45c774552 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -287,7 +287,7 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = ast_crtc->lut_b[regno] << 8;
}
-static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
.gamma_set = ast_fb_gamma_set,
.gamma_get = ast_fb_gamma_get,
.fb_probe = astfb_create,
@@ -328,8 +328,10 @@ int ast_fbdev_init(struct drm_device *dev)
return -ENOMEM;
ast->fbdev = afbdev;
- afbdev->helper.funcs = &ast_fb_helper_funcs;
spin_lock_init(&afbdev->dirty_lock);
+
+ drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
+
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 114aee941d46..5389350244f2 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -667,17 +667,9 @@ static void ast_encoder_destroy(struct drm_encoder *encoder)
static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -829,7 +821,7 @@ static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
ast_i2c_destroy(ast_connector->i2c);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -871,7 +863,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 9c13df29fd20..f5e0ead974a6 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -97,6 +97,7 @@ static struct drm_driver bochs_driver = {
/* ---------------------------------------------------------------------- */
/* pm interface */
+#ifdef CONFIG_PM_SLEEP
static int bochs_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -131,6 +132,7 @@ static int bochs_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
return 0;
}
+#endif
static const struct dev_pm_ops bochs_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 561b84474122..fe95d31cd110 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -72,7 +72,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
bo = gem_to_bochs_bo(gobj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret)
return ret;
@@ -179,7 +179,7 @@ void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = regno;
}
-static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
.gamma_set = bochs_fb_gamma_set,
.gamma_get = bochs_fb_gamma_get,
.fb_probe = bochsfb_create,
@@ -189,7 +189,8 @@ int bochs_fbdev_init(struct bochs_device *bochs)
{
int ret;
- bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+ drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
+ &bochs_fb_helper_funcs);
ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
1, 1);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index dcf2e55f4ae9..9d7346b92653 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -53,7 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
bochs_fb = to_bochs_framebuffer(old_fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret) {
DRM_ERROR("failed to reserve old_fb bo\n");
} else {
@@ -67,7 +67,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret)
return ret;
@@ -216,18 +216,9 @@ static struct drm_encoder *
bochs_connector_best_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index b9a695d92792..1728a1b0b813 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
*obj = NULL;
- size = ALIGN(size, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
if (size == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 98fd17ae4916..d466696ed5e8 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -328,7 +328,7 @@ int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
}
drm_connector_helper_add(&ptn_bridge->connector,
&ptn3460_connector_helper_funcs);
- drm_sysfs_connector_add(&ptn_bridge->connector);
+ drm_connector_register(&ptn_bridge->connector);
drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 08ce520f61a5..4516b052cc67 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -76,6 +76,7 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+#ifdef CONFIG_PM_SLEEP
static int cirrus_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -110,6 +111,7 @@ static int cirrus_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
return 0;
}
+#endif
static const struct file_operations cirrus_driver_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 117d3eca5e37..401c890b6c6a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -241,7 +241,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32bbba0a787b..2a135f253e29 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -288,7 +288,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
return 0;
}
-static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
.gamma_set = cirrus_crtc_fb_gamma_set,
.gamma_get = cirrus_crtc_fb_gamma_get,
.fb_probe = cirrusfb_create,
@@ -306,9 +306,11 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
return -ENOMEM;
cdev->mode_info.gfbdev = gfbdev;
- gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
spin_lock_init(&gfbdev->dirty_lock);
+ drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
+ &cirrus_fb_helper_funcs);
+
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
if (ret) {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 49332c5fe35b..e1c5c3222129 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -509,19 +509,9 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj =
- drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 0406110f83ed..86a4a4a60afc 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -80,11 +80,7 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size)
error_out:
- /* Only last element can be null pointer so check for it first. */
- if ((*buf)->data[idx])
- kfree((*buf)->data[idx]);
-
- for (--idx; idx >= 0; --idx)
+ for (; idx >= 0; --idx)
kfree((*buf)->data[idx]);
kfree(*buf);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 68175b54504b..61acb8f6756d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1217,7 +1217,6 @@ int drm_infobufs(struct drm_device *dev, void *data,
struct drm_buf_desc __user *to =
&request->list[count];
struct drm_buf_entry *from = &dma->bufs[i];
- struct drm_freelist *list = &dma->bufs[i].freelist;
if (copy_to_user(&to->count,
&from->buf_count,
sizeof(from->buf_count)) ||
@@ -1225,19 +1224,19 @@ int drm_infobufs(struct drm_device *dev, void *data,
&from->buf_size,
sizeof(from->buf_size)) ||
copy_to_user(&to->low_mark,
- &list->low_mark,
- sizeof(list->low_mark)) ||
+ &from->low_mark,
+ sizeof(from->low_mark)) ||
copy_to_user(&to->high_mark,
- &list->high_mark,
- sizeof(list->high_mark)))
+ &from->high_mark,
+ sizeof(from->high_mark)))
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
- dma->bufs[i].freelist.low_mark,
- dma->bufs[i].freelist.high_mark);
+ dma->bufs[i].low_mark,
+ dma->bufs[i].high_mark);
++count;
}
}
@@ -1290,8 +1289,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
if (request->high_mark < 0 || request->high_mark > entry->buf_count)
return -EINVAL;
- entry->freelist.low_mark = request->low_mark;
- entry->freelist.high_mark = request->high_mark;
+ entry->low_mark = request->low_mark;
+ entry->high_mark = request->high_mark;
return 0;
}
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index a4b017b6849e..9b23525c0ed0 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -1,18 +1,13 @@
-/**
- * \file drm_context.c
- * IOCTLs for generic contexts
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ * Legacy: Generic DRM Contexts
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -33,14 +28,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/*
- * ChangeLog:
- * 2001-11-16 Torsten Duwe <duwe@caldera.de>
- * added context constructor/destructor hooks,
- * needed by SiS driver's memory management.
- */
-
#include <drm/drmP.h>
+#include "drm_legacy.h"
+
+struct drm_ctx_list {
+ struct list_head head;
+ drm_context_t handle;
+ struct drm_file *tag;
+};
/******************************************************************/
/** \name Context bitmap support */
@@ -56,7 +51,7 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
@@ -72,7 +67,7 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
-static int drm_ctxbitmap_next(struct drm_device * dev)
+static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
{
int ret;
@@ -90,7 +85,7 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-int drm_ctxbitmap_init(struct drm_device * dev)
+int drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
idr_init(&dev->ctx_idr);
return 0;
@@ -104,13 +99,43 @@ int drm_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
+/**
+ * drm_ctxbitmap_flush() - Flush all contexts owned by a file
+ * @dev: DRM device to operate on
+ * @file: Open file to flush contexts for
+ *
+ * This iterates over all contexts on @dev and drops them if they're owned by
+ * @file. Note that after this call returns, new contexts might be added if
+ * the file is still alive.
+ */
+void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_ctx_list *pos, *tmp;
+
+ mutex_lock(&dev->ctxlist_mutex);
+
+ list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
+ if (pos->tag == file &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev, pos->handle);
+
+ drm_legacy_ctxbitmap_free(dev, pos->handle);
+ list_del(&pos->head);
+ kfree(pos);
+ }
+ }
+
+ mutex_unlock(&dev->ctxlist_mutex);
+}
+
/*@}*/
/******************************************************************/
@@ -129,8 +154,8 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
-int drm_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map;
@@ -173,8 +198,8 @@ int drm_getsareactx(struct drm_device *dev, void *data,
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
-int drm_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map = NULL;
@@ -273,8 +298,8 @@ static int drm_context_switch_complete(struct drm_device *dev,
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
-int drm_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_resctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
@@ -304,16 +329,16 @@ int drm_resctx(struct drm_device *dev, void *data,
*
* Get a new handle for the context and copy to userspace.
*/
-int drm_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_addctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
- ctx->handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
- ctx->handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == -1) {
@@ -348,7 +373,8 @@ int drm_addctx(struct drm_device *dev, void *data,
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
-int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_legacy_getctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -369,8 +395,8 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Calls context_switch().
*/
-int drm_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -389,8 +415,8 @@ int drm_switchctx(struct drm_device *dev, void *data,
*
* Calls context_switch_complete().
*/
-int drm_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_newctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -411,8 +437,8 @@ int drm_newctx(struct drm_device *dev, void *data,
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
-int drm_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_rmctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -420,7 +446,7 @@ int drm_rmctx(struct drm_device *dev, void *data,
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
- drm_ctxbitmap_free(dev, ctx->handle);
+ drm_legacy_ctxbitmap_free(dev, ctx->handle);
}
mutex_lock(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fe94cc10cd35..fa2be249999c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -41,6 +41,10 @@
#include "drm_crtc_internal.h"
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv);
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
@@ -178,6 +182,12 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+ { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+ { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+ { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
/*
* Non-global properties, but "required" for certain connectors.
*/
@@ -357,6 +367,32 @@ const char *drm_get_format_name(uint32_t format)
}
EXPORT_SYMBOL(drm_get_format_name);
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+static int drm_mode_object_get_reg(struct drm_device *dev,
+ struct drm_mode_object *obj,
+ uint32_t obj_type,
+ bool register_obj)
+{
+ int ret;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = ret;
+ obj->type = obj_type;
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+
/**
* drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
@@ -375,21 +411,15 @@ EXPORT_SYMBOL(drm_get_format_name);
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
- int ret;
+ return drm_mode_object_get_reg(dev, obj, obj_type, true);
+}
+static void drm_mode_object_register(struct drm_device *dev,
+ struct drm_mode_object *obj)
+{
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- /*
- * Set up the object linking under the protection of the idr
- * lock so that other users can't see inconsistent state.
- */
- obj->id = ret;
- obj->type = obj_type;
- }
+ idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
mutex_unlock(&dev->mode_config.idr_mutex);
-
- return ret < 0 ? ret : 0;
}
/**
@@ -416,8 +446,12 @@ static struct drm_mode_object *_object_find(struct drm_device *dev,
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
- (obj->id != id))
+ if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+ obj = NULL;
+ if (obj && obj->id != id)
+ obj = NULL;
+ /* don't leak out unref'd fb's */
+ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
@@ -444,9 +478,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
obj = _object_find(dev, id, type);
- /* don't leak out unref'd fb's */
- if (obj && (obj->type == DRM_MODE_OBJECT_FB))
- obj = NULL;
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@@ -723,7 +754,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
*/
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
- void *cursor,
+ struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs)
{
struct drm_mode_config *config = &dev->mode_config;
@@ -748,8 +779,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
config->num_crtc++;
crtc->primary = primary;
+ crtc->cursor = cursor;
if (primary)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+ if (cursor)
+ cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
out:
drm_modeset_unlock_all(dev);
@@ -842,7 +876,7 @@ int drm_connector_init(struct drm_device *dev,
drm_modeset_lock_all(dev);
- ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
if (ret)
goto out_unlock;
@@ -881,6 +915,8 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
+ connector->debugfs_entry = NULL;
+
out_put:
if (ret)
drm_mode_object_put(dev, &connector->base);
@@ -921,6 +957,49 @@ void drm_connector_cleanup(struct drm_connector *connector)
EXPORT_SYMBOL(drm_connector_cleanup);
/**
+ * drm_connector_register - register a connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register(struct drm_connector *connector)
+{
+ int ret;
+
+ drm_mode_object_register(connector->dev, &connector->base);
+
+ ret = drm_sysfs_connector_add(connector);
+ if (ret)
+ return ret;
+
+ ret = drm_debugfs_connector_add(connector);
+ if (ret) {
+ drm_sysfs_connector_remove(connector);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_register);
+
+/**
+ * drm_connector_unregister - unregister a connector
+ * @connector: the connector to unregister
+ *
+ * Unregister userspace interfaces for a connector
+ */
+void drm_connector_unregister(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_debugfs_connector_remove(connector);
+}
+EXPORT_SYMBOL(drm_connector_unregister);
+
+
+/**
* drm_connector_unplug_all - unregister connector userspace interfaces
* @dev: drm device
*
@@ -934,7 +1013,7 @@ void drm_connector_unplug_all(struct drm_device *dev)
/* taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
}
EXPORT_SYMBOL(drm_connector_unplug_all);
@@ -1214,6 +1293,7 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
+ struct drm_property *dev_path;
/*
* Standard properties (apply to all connectors)
@@ -1228,6 +1308,12 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
+ dev_path = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "PATH", 0);
+ dev->mode_config.path_property = dev_path;
+
return 0;
}
@@ -1384,6 +1470,33 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+ if (dev->mode_config.aspect_ratio_property)
+ return 0;
+
+ dev->mode_config.aspect_ratio_property =
+ drm_property_create_enum(dev, 0, "aspect ratio",
+ drm_aspect_ratio_enum_list,
+ ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+ if (dev->mode_config.aspect_ratio_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
@@ -1470,6 +1583,15 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
+void drm_reinit_primary_mode_group(struct drm_device *dev)
+{
+ drm_modeset_lock_all(dev);
+ drm_mode_group_destroy(&dev->primary->mode_group);
+ drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+ drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_reinit_primary_mode_group);
+
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
@@ -2118,45 +2240,32 @@ out:
return ret;
}
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
+/*
+ * setplane_internal - setplane handler for internal callers
*
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable.
+ * Note that we assume an extra reference has already been taken on fb. If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
*
- * Returns:
- * Zero on success, errno on failure.
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
*/
-int drm_mode_setplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int setplane_internal(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ /* src_{x,y,w,h} values are 16.16 fixed point */
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
{
- struct drm_mode_set_plane *plane_req = data;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ struct drm_device *dev = plane->dev;
+ struct drm_framebuffer *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- /*
- * First, find the plane, crtc, and fb objects. If not available,
- * we don't bother to call the driver.
- */
- plane = drm_plane_find(dev, plane_req->plane_id);
- if (!plane) {
- DRM_DEBUG_KMS("Unknown plane ID %d\n",
- plane_req->plane_id);
- return -ENOENT;
- }
-
/* No fb means shut it down */
- if (!plane_req->fb_id) {
+ if (!fb) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
@@ -2170,14 +2279,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
- crtc = drm_crtc_find(dev, plane_req->crtc_id);
- if (!crtc) {
- DRM_DEBUG_KMS("Unknown crtc ID %d\n",
- plane_req->crtc_id);
- ret = -ENOENT;
- goto out;
- }
-
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_KMS("Invalid crtc for plane\n");
@@ -2185,14 +2286,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
- fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
- if (!fb) {
- DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
- plane_req->fb_id);
- ret = -ENOENT;
- goto out;
- }
-
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (fb->pixel_format == plane->format_types[i])
@@ -2208,43 +2301,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
- if (plane_req->src_w > fb_width ||
- plane_req->src_x > fb_width - plane_req->src_w ||
- plane_req->src_h > fb_height ||
- plane_req->src_y > fb_height - plane_req->src_h) {
+ if (src_w > fb_width ||
+ src_x > fb_width - src_w ||
+ src_h > fb_height ||
+ src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
- plane_req->src_w >> 16,
- ((plane_req->src_w & 0xffff) * 15625) >> 10,
- plane_req->src_h >> 16,
- ((plane_req->src_h & 0xffff) * 15625) >> 10,
- plane_req->src_x >> 16,
- ((plane_req->src_x & 0xffff) * 15625) >> 10,
- plane_req->src_y >> 16,
- ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
goto out;
}
- /* Give drivers some help against integer overflows */
- if (plane_req->crtc_w > INT_MAX ||
- plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
- plane_req->crtc_h > INT_MAX ||
- plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->crtc_x, plane_req->crtc_y);
- ret = -ERANGE;
- goto out;
- }
-
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
- plane_req->crtc_x, plane_req->crtc_y,
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->src_x, plane_req->src_y,
- plane_req->src_w, plane_req->src_h);
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
@@ -2261,6 +2336,85 @@ out:
drm_framebuffer_unreference(old_fb);
return ret;
+
+}
+
+/**
+ * drm_mode_setplane - configure a plane's configuration
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_priv: DRM file info
+ *
+ * Set plane configuration, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable (planes may be disabled without providing a
+ * valid crtc).
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc = NULL;
+ struct drm_framebuffer *fb = NULL;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ return -ERANGE;
+ }
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ return -ENOENT;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane_req->fb_id) {
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ return -ENOENT;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ return -ENOENT;
+ }
+ crtc = obj_to_crtc(obj);
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ return setplane_internal(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
}
/**
@@ -2509,6 +2663,102 @@ out:
return ret;
}
+/**
+ * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
+ * universal plane handler call
+ * @crtc: crtc to update cursor for
+ * @req: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Legacy cursor ioctl's work directly with driver buffer handles. To
+ * translate legacy ioctl calls into universal plane handler calls, we need to
+ * wrap the native buffer handle in a drm_framebuffer.
+ *
+ * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
+ * buffer with a pitch of 4*width; the universal plane interface should be used
+ * directly in cases where the hardware can support other buffer settings and
+ * userspace wants to make use of these capabilities.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd2 fbreq = {
+ .width = req->width,
+ .height = req->height,
+ .pixel_format = DRM_FORMAT_ARGB8888,
+ .pitches = { req->width * 4 },
+ .handles = { req->handle },
+ };
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w = 0, crtc_h = 0;
+ uint32_t src_w = 0, src_h = 0;
+ int ret = 0;
+
+ BUG_ON(!crtc->cursor);
+
+ /*
+ * Obtain fb we'll be using (either new or existing) and take an extra
+ * reference to it if fb != null. setplane will take care of dropping
+ * the reference if the plane update fails.
+ */
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (req->handle) {
+ fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+ return PTR_ERR(fb);
+ }
+
+ drm_framebuffer_reference(fb);
+ } else {
+ fb = NULL;
+ }
+ } else {
+ mutex_lock(&dev->mode_config.mutex);
+ fb = crtc->cursor->fb;
+ if (fb)
+ drm_framebuffer_reference(fb);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc_x = req->x;
+ crtc_y = req->y;
+ } else {
+ crtc_x = crtc->cursor_x;
+ crtc_y = crtc->cursor_y;
+ }
+
+ if (fb) {
+ crtc_w = fb->width;
+ crtc_h = fb->height;
+ src_w = fb->width << 16;
+ src_h = fb->height << 16;
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ ret = setplane_internal(crtc->cursor, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h);
+
+ /* Update successful; save new cursor position, if necessary */
+ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc->cursor_x = req->x;
+ crtc->cursor_y = req->y;
+ }
+
+ return ret;
+}
+
static int drm_mode_cursor_common(struct drm_device *dev,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
@@ -2528,6 +2778,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
return -ENOENT;
}
+ /*
+ * If this crtc has a universal cursor plane, call that plane's update
+ * handler rather than using legacy cursor handlers.
+ */
+ if (crtc->cursor)
+ return drm_mode_cursor_universal(crtc, req, file_priv);
+
drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
@@ -2827,56 +3084,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return 0;
}
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
if (r->flags & ~DRM_MODE_FB_INTERLACED) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
ret = framebuffer_check(r);
if (ret)
- return ret;
+ return ERR_PTR(ret);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- return PTR_ERR(fb);
+ return fb;
}
mutex_lock(&file_priv->fbs_lock);
@@ -2885,8 +3124,37 @@ int drm_mode_addfb2(struct drm_device *dev,
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
+ return fb;
+}
- return ret;
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fb;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = add_framebuffer_internal(dev, data, file_priv);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ return 0;
}
/**
@@ -3176,7 +3444,7 @@ fail:
EXPORT_SYMBOL(drm_property_create);
/**
- * drm_property_create - create a new enumeration property type
+ * drm_property_create_enum - create a new enumeration property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3222,7 +3490,7 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
EXPORT_SYMBOL(drm_property_create_enum);
/**
- * drm_property_create - create a new bitmask property type
+ * drm_property_create_bitmask - create a new bitmask property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3242,19 +3510,28 @@ EXPORT_SYMBOL(drm_property_create_enum);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
- int num_values)
+ int num_props,
+ uint64_t supported_bits)
{
struct drm_property *property;
- int i, ret;
+ int i, ret, index = 0;
+ int num_values = hweight64(supported_bits);
flags |= DRM_MODE_PROP_BITMASK;
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
+ for (i = 0; i < num_props; i++) {
+ if (!(supported_bits & (1ULL << props[i].type)))
+ continue;
- for (i = 0; i < num_values; i++) {
- ret = drm_property_add_enum(property, i,
+ if (WARN_ON(index >= num_values)) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+
+ ret = drm_property_add_enum(property, index++,
props[i].type,
props[i].name);
if (ret) {
@@ -3284,7 +3561,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
}
/**
- * drm_property_create - create a new ranged property type
+ * drm_property_create_range - create a new ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3703,6 +3980,25 @@ done:
return ret;
}
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+ char *path)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+ size = strlen(path) + 1;
+
+ connector->path_blob_ptr = drm_property_create_blob(connector->dev,
+ size, path);
+ if (!connector->path_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.path_property,
+ connector->path_blob_ptr->base.id);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
@@ -3720,6 +4016,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
int ret, size;
+ /* ignore requests to set edid when overridden */
+ if (connector->override_edid)
+ return 0;
+
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -4680,6 +4980,36 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
/**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
+ * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
+ * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+ unsigned int supported_rotations)
+{
+ if (rotation & ~supported_rotations) {
+ rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
+ rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
+ }
+
+ return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
+
+/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
@@ -4797,3 +5127,21 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+ unsigned int supported_rotations)
+{
+ static const struct drm_prop_enum_list props[] = {
+ { DRM_ROTATE_0, "rotate-0" },
+ { DRM_ROTATE_90, "rotate-90" },
+ { DRM_ROTATE_180, "rotate-180" },
+ { DRM_ROTATE_270, "rotate-270" },
+ { DRM_REFLECT_X, "reflect-x" },
+ { DRM_REFLECT_Y, "reflect-y" },
+ };
+
+ return drm_property_create_bitmask(dev, 0, "rotation",
+ props, ARRAY_SIZE(props),
+ supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 78b37f3febd3..6c65a0a28fbd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -818,6 +818,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
+ fb->flags = mode_cmd->flags;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b4b51d46f339..13bd42923dd4 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include <drm/drm_edid.h>
#if defined(CONFIG_DEBUG_FS)
@@ -237,5 +238,186 @@ int drm_debugfs_cleanup(struct drm_minor *minor)
return 0;
}
+static int connector_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ const char *status;
+
+ switch (connector->force) {
+ case DRM_FORCE_ON:
+ status = "on\n";
+ break;
+
+ case DRM_FORCE_ON_DIGITAL:
+ status = "digital\n";
+ break;
+
+ case DRM_FORCE_OFF:
+ status = "off\n";
+ break;
+
+ case DRM_FORCE_UNSPECIFIED:
+ status = "unspecified\n";
+ break;
+
+ default:
+ return 0;
+ }
+
+ seq_puts(m, status);
+
+ return 0;
+}
+
+static int connector_open(struct inode *inode, struct file *file)
+{
+ struct drm_connector *dev = inode->i_private;
+
+ return single_open(file, connector_show, dev);
+}
+
+static ssize_t connector_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_connector *connector = m->private;
+ char buf[12];
+
+ if (len > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (!strcmp(buf, "on"))
+ connector->force = DRM_FORCE_ON;
+ else if (!strcmp(buf, "digital"))
+ connector->force = DRM_FORCE_ON_DIGITAL;
+ else if (!strcmp(buf, "off"))
+ connector->force = DRM_FORCE_OFF;
+ else if (!strcmp(buf, "unspecified"))
+ connector->force = DRM_FORCE_UNSPECIFIED;
+ else
+ return -EINVAL;
+
+ return len;
+}
+
+static int edid_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_property_blob *edid = connector->edid_blob_ptr;
+
+ if (connector->override_edid && edid)
+ seq_write(m, edid->data, edid->length);
+
+ return 0;
+}
+
+static int edid_open(struct inode *inode, struct file *file)
+{
+ struct drm_connector *dev = inode->i_private;
+
+ return single_open(file, edid_show, dev);
+}
+
+static ssize_t edid_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_connector *connector = m->private;
+ char *buf;
+ struct edid *edid;
+ int ret;
+
+ buf = memdup_user(ubuf, len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ edid = (struct edid *) buf;
+
+ if (len == 5 && !strncmp(buf, "reset", 5)) {
+ connector->override_edid = false;
+ ret = drm_mode_connector_update_edid_property(connector, NULL);
+ } else if (len < EDID_LENGTH ||
+ EDID_LENGTH * (1 + edid->extensions) > len)
+ ret = -EINVAL;
+ else {
+ connector->override_edid = false;
+ ret = drm_mode_connector_update_edid_property(connector, edid);
+ if (!ret)
+ connector->override_edid = true;
+ }
+
+ kfree(buf);
+
+ return (ret) ? ret : len;
+}
+
+static const struct file_operations drm_edid_fops = {
+ .owner = THIS_MODULE,
+ .open = edid_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = edid_write
+};
+
+
+static const struct file_operations drm_connector_fops = {
+ .owner = THIS_MODULE,
+ .open = connector_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = connector_write
+};
+
+int drm_debugfs_connector_add(struct drm_connector *connector)
+{
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root, *ent;
+
+ if (!minor->debugfs_root)
+ return -1;
+
+ root = debugfs_create_dir(connector->name, minor->debugfs_root);
+ if (!root)
+ return -ENOMEM;
+
+ connector->debugfs_entry = root;
+
+ /* force */
+ ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
+ &drm_connector_fops);
+ if (!ent)
+ goto error;
+
+ /* edid */
+ ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root,
+ connector, &drm_edid_fops);
+ if (!ent)
+ goto error;
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(connector->debugfs_entry);
+ connector->debugfs_entry = NULL;
+ return -ENOMEM;
+}
+
+void drm_debugfs_connector_remove(struct drm_connector *connector)
+{
+ if (!connector->debugfs_entry)
+ return;
+
+ debugfs_remove_recursive(connector->debugfs_entry);
+
+ connector->debugfs_entry = NULL;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
new file mode 100644
index 000000000000..ac3c2738db94
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -0,0 +1,2715 @@
+/*
+ * Copyright © 2014 Red Hat
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drmP.h>
+
+#include <drm/drm_fixed.h>
+
+/**
+ * DOC: dp mst helper
+ *
+ * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
+ * protocol. The helpers contain a topology manager and bandwidth manager.
+ * The helpers encapsulate the sending and received of sideband msgs.
+ */
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+ char *buf);
+static int test_calc_pbn_mode(void);
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload);
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes);
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port);
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ u8 *guid);
+
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+/* sideband msg handling */
+static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
+{
+ u8 bitmask = 0x80;
+ u8 bitshift = 7;
+ u8 array_index = 0;
+ int number_of_bits = num_nibbles * 4;
+ u8 remainder = 0;
+
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ remainder |= (data[array_index] & bitmask) >> bitshift;
+ bitmask >>= 1;
+ bitshift--;
+ if (bitmask == 0) {
+ bitmask = 0x80;
+ bitshift = 7;
+ array_index++;
+ }
+ if ((remainder & 0x10) == 0x10)
+ remainder ^= 0x13;
+ }
+
+ number_of_bits = 4;
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ if ((remainder & 0x10) != 0)
+ remainder ^= 0x13;
+ }
+
+ return remainder;
+}
+
+static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
+{
+ u8 bitmask = 0x80;
+ u8 bitshift = 7;
+ u8 array_index = 0;
+ int number_of_bits = number_of_bytes * 8;
+ u16 remainder = 0;
+
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ remainder |= (data[array_index] & bitmask) >> bitshift;
+ bitmask >>= 1;
+ bitshift--;
+ if (bitmask == 0) {
+ bitmask = 0x80;
+ bitshift = 7;
+ array_index++;
+ }
+ if ((remainder & 0x100) == 0x100)
+ remainder ^= 0xd5;
+ }
+
+ number_of_bits = 8;
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ if ((remainder & 0x100) != 0)
+ remainder ^= 0xd5;
+ }
+
+ return remainder & 0xff;
+}
+static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
+{
+ u8 size = 3;
+ size += (hdr->lct / 2);
+ return size;
+}
+
+static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+ u8 *buf, int *len)
+{
+ int idx = 0;
+ int i;
+ u8 crc4;
+ buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
+ for (i = 0; i < (hdr->lct / 2); i++)
+ buf[idx++] = hdr->rad[i];
+ buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
+ (hdr->msg_len & 0x3f);
+ buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
+
+ crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
+ buf[idx - 1] |= (crc4 & 0xf);
+
+ *len = idx;
+}
+
+static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+ u8 *buf, int buflen, u8 *hdrlen)
+{
+ u8 crc4;
+ u8 len;
+ int i;
+ u8 idx;
+ if (buf[0] == 0)
+ return false;
+ len = 3;
+ len += ((buf[0] & 0xf0) >> 4) / 2;
+ if (len > buflen)
+ return false;
+ crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
+
+ if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
+ DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
+ return false;
+ }
+
+ hdr->lct = (buf[0] & 0xf0) >> 4;
+ hdr->lcr = (buf[0] & 0xf);
+ idx = 1;
+ for (i = 0; i < (hdr->lct / 2); i++)
+ hdr->rad[i] = buf[idx++];
+ hdr->broadcast = (buf[idx] >> 7) & 0x1;
+ hdr->path_msg = (buf[idx] >> 6) & 0x1;
+ hdr->msg_len = buf[idx] & 0x3f;
+ idx++;
+ hdr->somt = (buf[idx] >> 7) & 0x1;
+ hdr->eomt = (buf[idx] >> 6) & 0x1;
+ hdr->seqno = (buf[idx] >> 4) & 0x1;
+ idx++;
+ *hdrlen = idx;
+ return true;
+}
+
+static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw)
+{
+ int idx = 0;
+ int i;
+ u8 *buf = raw->msg;
+ buf[idx++] = req->req_type & 0x7f;
+
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
+ buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+ idx++;
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
+ (req->u.allocate_payload.number_sdp_streams & 0xf);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.pbn >> 8);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.pbn & 0xff);
+ idx++;
+ for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
+ buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
+ (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
+ idx++;
+ }
+ if (req->u.allocate_payload.number_sdp_streams & 1) {
+ i = req->u.allocate_payload.number_sdp_streams - 1;
+ buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
+ idx++;
+ }
+ break;
+ case DP_QUERY_PAYLOAD:
+ buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
+ idx++;
+ buf[idx] = (req->u.query_payload.vcpi & 0x7f);
+ idx++;
+ break;
+ case DP_REMOTE_DPCD_READ:
+ buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
+ buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
+ idx++;
+ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
+ idx++;
+ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
+ idx++;
+ buf[idx] = (req->u.dpcd_read.num_bytes);
+ idx++;
+ break;
+
+ case DP_REMOTE_DPCD_WRITE:
+ buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
+ buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
+ idx++;
+ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
+ idx++;
+ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
+ idx++;
+ buf[idx] = (req->u.dpcd_write.num_bytes);
+ idx++;
+ memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
+ idx += req->u.dpcd_write.num_bytes;
+ break;
+ case DP_REMOTE_I2C_READ:
+ buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
+ buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
+ idx++;
+ for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
+ buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
+ idx++;
+ buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
+ idx++;
+ memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
+ idx += req->u.i2c_read.transactions[i].num_bytes;
+
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
+ idx++;
+ }
+ buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
+ idx++;
+ buf[idx] = (req->u.i2c_read.num_bytes_read);
+ idx++;
+ break;
+
+ case DP_REMOTE_I2C_WRITE:
+ buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
+ idx++;
+ buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
+ idx++;
+ buf[idx] = (req->u.i2c_write.num_bytes);
+ idx++;
+ memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
+ idx += req->u.i2c_write.num_bytes;
+ break;
+ }
+ raw->cur_len = idx;
+}
+
+static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
+{
+ u8 crc4;
+ crc4 = drm_dp_msg_data_crc4(msg, len);
+ msg[len] = crc4;
+}
+
+static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
+ struct drm_dp_sideband_msg_tx *raw)
+{
+ int idx = 0;
+ u8 *buf = raw->msg;
+
+ buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
+
+ raw->cur_len = idx;
+}
+
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
+ u8 *replybuf, u8 replybuflen, bool hdr)
+{
+ int ret;
+ u8 crc4;
+
+ if (hdr) {
+ u8 hdrlen;
+ struct drm_dp_sideband_msg_hdr recv_hdr;
+ ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
+ if (ret == false) {
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
+ return false;
+ }
+
+ /* get length contained in this portion */
+ msg->curchunk_len = recv_hdr.msg_len;
+ msg->curchunk_hdrlen = hdrlen;
+
+ /* we have already gotten an somt - don't bother parsing */
+ if (recv_hdr.somt && msg->have_somt)
+ return false;
+
+ if (recv_hdr.somt) {
+ memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
+ msg->have_somt = true;
+ }
+ if (recv_hdr.eomt)
+ msg->have_eomt = true;
+
+ /* copy the bytes for the remainder of this header chunk */
+ msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
+ memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
+ } else {
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+ msg->curchunk_idx += replybuflen;
+ }
+
+ if (msg->curchunk_idx >= msg->curchunk_len) {
+ /* do CRC */
+ crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+ /* copy chunk into bigger msg */
+ memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
+ msg->curlen += msg->curchunk_len - 1;
+ }
+ return true;
+}
+
+static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ int i;
+ memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
+ idx += 16;
+ repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ for (i = 0; i < repmsg->u.link_addr.nports; i++) {
+ if (raw->msg[idx] & 0x80)
+ repmsg->u.link_addr.ports[i].input_port = 1;
+
+ repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
+ repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
+
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
+ repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
+ if (repmsg->u.link_addr.ports[i].input_port == 0)
+ repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ if (repmsg->u.link_addr.ports[i].input_port == 0) {
+ repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
+ repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
+ idx++;
+
+ }
+ if (idx > raw->curlen)
+ goto fail_len;
+ }
+
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+
+ repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
+ idx++;
+ /* TODO check */
+ memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.allocate_payload.vcpi = raw->msg[idx];
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->reply_type = (raw->msg[0] & 0x80) >> 7;
+ msg->req_type = (raw->msg[0] & 0x7f);
+
+ if (msg->reply_type) {
+ memcpy(msg->u.nak.guid, &raw->msg[1], 16);
+ msg->u.nak.reason = raw->msg[17];
+ msg->u.nak.nak_data = raw->msg[18];
+ return false;
+ }
+
+ switch (msg->req_type) {
+ case DP_LINK_ADDRESS:
+ return drm_dp_sideband_parse_link_address(raw, msg);
+ case DP_QUERY_PAYLOAD:
+ return drm_dp_sideband_parse_query_payload_ack(raw, msg);
+ case DP_REMOTE_DPCD_READ:
+ return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
+ case DP_REMOTE_DPCD_WRITE:
+ return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
+ case DP_REMOTE_I2C_READ:
+ return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
+ case DP_ENUM_PATH_RESOURCES:
+ return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
+ case DP_ALLOCATE_PAYLOAD:
+ return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+ default:
+ DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
+ return false;
+ }
+}
+
+static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ int idx = 1;
+
+ msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
+ msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+ msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
+ msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
+ msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
+ idx++;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ int idx = 1;
+
+ msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+ idx++;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->req_type = (raw->msg[0] & 0x7f);
+
+ switch (msg->req_type) {
+ case DP_CONNECTION_STATUS_NOTIFY:
+ return drm_dp_sideband_parse_connection_status_notify(raw, msg);
+ case DP_RESOURCE_STATUS_NOTIFY:
+ return drm_dp_sideband_parse_resource_status_notify(raw, msg);
+ default:
+ DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
+ return false;
+ }
+}
+
+static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_REMOTE_DPCD_WRITE;
+ req.u.dpcd_write.port_number = port_num;
+ req.u.dpcd_write.dpcd_address = offset;
+ req.u.dpcd_write.num_bytes = num_bytes;
+ req.u.dpcd_write.bytes = bytes;
+ drm_dp_encode_sideband_req(&req, msg);
+
+ return 0;
+}
+
+static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_LINK_ADDRESS;
+ drm_dp_encode_sideband_req(&req, msg);
+ return 0;
+}
+
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_ENUM_PATH_RESOURCES;
+ req.u.port_num.port_number = port_num;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
+static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
+ u8 vcpi, uint16_t pbn)
+{
+ struct drm_dp_sideband_msg_req_body req;
+ memset(&req, 0, sizeof(req));
+ req.req_type = DP_ALLOCATE_PAYLOAD;
+ req.u.allocate_payload.port_number = port_num;
+ req.u.allocate_payload.vcpi = vcpi;
+ req.u.allocate_payload.pbn = pbn;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
+static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_vcpi *vcpi)
+{
+ int ret;
+
+ mutex_lock(&mgr->payload_lock);
+ ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
+ if (ret > mgr->max_payloads) {
+ ret = -EINVAL;
+ DRM_DEBUG_KMS("out of payload ids %d\n", ret);
+ goto out_unlock;
+ }
+
+ set_bit(ret, &mgr->payload_mask);
+ vcpi->vcpi = ret;
+ mgr->proposed_vcpis[ret - 1] = vcpi;
+out_unlock:
+ mutex_unlock(&mgr->payload_lock);
+ return ret;
+}
+
+static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+ int id)
+{
+ if (id == 0)
+ return;
+
+ mutex_lock(&mgr->payload_lock);
+ DRM_DEBUG_KMS("putting payload %d\n", id);
+ clear_bit(id, &mgr->payload_mask);
+ mgr->proposed_vcpis[id - 1] = NULL;
+ mutex_unlock(&mgr->payload_lock);
+}
+
+static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ bool ret;
+ mutex_lock(&mgr->qlock);
+ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
+ mutex_unlock(&mgr->qlock);
+ return ret;
+}
+
+static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ int ret;
+
+ ret = wait_event_timeout(mgr->tx_waitq,
+ check_txmsg_state(mgr, txmsg),
+ (4 * HZ));
+ mutex_lock(&mstb->mgr->qlock);
+ if (ret > 0) {
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
+ ret = -EIO;
+ goto out;
+ }
+ } else {
+ DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
+
+ /* dump some state */
+ ret = -EIO;
+
+ /* remove from q */
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+ list_del(&txmsg->next);
+ }
+
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
+ mstb->tx_slots[txmsg->seqno] = NULL;
+ }
+ }
+out:
+ mutex_unlock(&mgr->qlock);
+
+ return ret;
+}
+
+static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
+{
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
+ if (!mstb)
+ return NULL;
+
+ mstb->lct = lct;
+ if (lct > 1)
+ memcpy(mstb->rad, rad, lct / 2);
+ INIT_LIST_HEAD(&mstb->ports);
+ kref_init(&mstb->kref);
+ return mstb;
+}
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ cancel_work_sync(&mstb->mgr->work);
+
+ /*
+ * destroy all ports - don't need lock
+ * as there are no more references to the mst branch
+ * device at this point.
+ */
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_put_port(port);
+ }
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up(&mstb->mgr->tx_waitq);
+ kfree(mstb);
+}
+
+static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+{
+ kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
+}
+
+
+static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
+{
+ switch (old_pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ drm_dp_put_mst_branch_device(port->mstb);
+ port->mstb = NULL;
+ break;
+ }
+}
+
+static void drm_dp_destroy_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ if (!port->input) {
+ port->vcpi.num_slots = 0;
+ if (port->connector)
+ (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ }
+ kfree(port);
+
+ (*mgr->cbs->hotplug)(mgr);
+}
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port)
+{
+ kref_put(&port->kref, drm_dp_destroy_port);
+}
+
+static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+{
+ struct drm_dp_mst_port *port;
+ struct drm_dp_mst_branch *rmstb;
+ if (to_find == mstb) {
+ kref_get(&mstb->kref);
+ return mstb;
+ }
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->mstb) {
+ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+ if (rmstb)
+ return rmstb;
+ }
+ }
+ return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_branch *rmstb = NULL;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+ mutex_unlock(&mgr->lock);
+ return rmstb;
+}
+
+static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
+{
+ struct drm_dp_mst_port *port, *mport;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port == to_find) {
+ kref_get(&port->kref);
+ return port;
+ }
+ if (port->mstb) {
+ mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
+ if (mport)
+ return mport;
+ }
+ }
+ return NULL;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *rport = NULL;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
+ mutex_unlock(&mgr->lock);
+ return rport;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
+{
+ struct drm_dp_mst_port *port;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+ kref_get(&port->kref);
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * calculate a new RAD for this MST branch device
+ * if parent has an LCT of 2 then it has 1 nibble of RAD,
+ * if parent has an LCT of 3 then it has 2 nibbles of RAD,
+ */
+static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+ u8 *rad)
+{
+ int lct = port->parent->lct;
+ int shift = 4;
+ int idx = lct / 2;
+ if (lct > 1) {
+ memcpy(rad, port->parent->rad, idx);
+ shift = (lct % 2) ? 4 : 0;
+ } else
+ rad[0] = 0;
+
+ rad[idx] |= port->port_num << shift;
+ return lct + 1;
+}
+
+/*
+ * return sends link address for new mstb
+ */
+static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+{
+ int ret;
+ u8 rad[6], lct;
+ bool send_link = false;
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ lct = drm_dp_calculate_rad(port, rad);
+
+ port->mstb = drm_dp_add_mst_branch_device(lct, rad);
+ port->mstb->mgr = port->mgr;
+ port->mstb->port_parent = port;
+
+ send_link = true;
+ break;
+ }
+ return send_link;
+}
+
+static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ int ret;
+ if (port->dpcd_rev >= 0x12) {
+ port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
+ if (!port->guid_valid) {
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
+ port,
+ DP_GUID,
+ 16, port->guid);
+ port->guid_valid = true;
+ }
+ }
+}
+
+static void build_mst_prop_path(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_branch *mstb,
+ char *proppath)
+{
+ int i;
+ char temp[8];
+ snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+ for (i = 0; i < (mstb->lct - 1); i++) {
+ int shift = (i % 2) ? 0 : 4;
+ int port_num = mstb->rad[i / 2] >> shift;
+ snprintf(temp, 8, "-%d", port_num);
+ strncat(proppath, temp, 255);
+ }
+ snprintf(temp, 8, "-%d", port->port_num);
+ strncat(proppath, temp, 255);
+}
+
+static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ struct device *dev,
+ struct drm_dp_link_addr_reply_port *port_msg)
+{
+ struct drm_dp_mst_port *port;
+ bool ret;
+ bool created = false;
+ int old_pdt = 0;
+ int old_ddps = 0;
+ port = drm_dp_get_port(mstb, port_msg->port_number);
+ if (!port) {
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return;
+ kref_init(&port->kref);
+ port->parent = mstb;
+ port->port_num = port_msg->port_number;
+ port->mgr = mstb->mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev;
+ created = true;
+ } else {
+ old_pdt = port->pdt;
+ old_ddps = port->ddps;
+ }
+
+ port->pdt = port_msg->peer_device_type;
+ port->input = port_msg->input_port;
+ port->mcs = port_msg->mcs;
+ port->ddps = port_msg->ddps;
+ port->ldps = port_msg->legacy_device_plug_status;
+ port->dpcd_rev = port_msg->dpcd_revision;
+ port->num_sdp_streams = port_msg->num_sdp_streams;
+ port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
+ memcpy(port->guid, port_msg->peer_guid, 16);
+
+ /* manage mstb port lists with mgr lock - take a reference
+ for this list */
+ if (created) {
+ mutex_lock(&mstb->mgr->lock);
+ kref_get(&port->kref);
+ list_add(&port->next, &mstb->ports);
+ mutex_unlock(&mstb->mgr->lock);
+ }
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+ drm_dp_check_port_guid(mstb, port);
+ if (!port->input)
+ drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ } else {
+ port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+
+ if (old_pdt != port->pdt && !port->input) {
+ drm_dp_port_teardown_pdt(port, old_pdt);
+
+ ret = drm_dp_port_setup_pdt(port);
+ if (ret == true) {
+ drm_dp_send_link_address(mstb->mgr, port->mstb);
+ port->mstb->link_address_sent = true;
+ }
+ }
+
+ if (created && !port->input) {
+ char proppath[255];
+ build_mst_prop_path(port, mstb, proppath);
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ }
+
+ /* put reference to this port */
+ drm_dp_put_port(port);
+}
+
+static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_connection_status_notify *conn_stat)
+{
+ struct drm_dp_mst_port *port;
+ int old_pdt;
+ int old_ddps;
+ bool dowork = false;
+ port = drm_dp_get_port(mstb, conn_stat->port_number);
+ if (!port)
+ return;
+
+ old_ddps = port->ddps;
+ old_pdt = port->pdt;
+ port->pdt = conn_stat->peer_device_type;
+ port->mcs = conn_stat->message_capability_status;
+ port->ldps = conn_stat->legacy_device_plug_status;
+ port->ddps = conn_stat->displayport_device_plug_status;
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+ drm_dp_check_port_guid(mstb, port);
+ dowork = true;
+ } else {
+ port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+ if (old_pdt != port->pdt && !port->input) {
+ drm_dp_port_teardown_pdt(port, old_pdt);
+
+ if (drm_dp_port_setup_pdt(port))
+ dowork = true;
+ }
+
+ drm_dp_put_port(port);
+ if (dowork)
+ queue_work(system_long_wq, &mstb->mgr->work);
+
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
+ u8 lct, u8 *rad)
+{
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_port *port;
+ int i;
+ /* find the port by iterating down */
+ mstb = mgr->mst_primary;
+
+ for (i = 0; i < lct - 1; i++) {
+ int shift = (i % 2) ? 0 : 4;
+ int port_num = rad[i / 2] >> shift;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+ if (!port->mstb) {
+ DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
+ return NULL;
+ }
+
+ mstb = port->mstb;
+ break;
+ }
+ }
+ }
+ kref_get(&mstb->kref);
+ return mstb;
+}
+
+static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ if (!mstb->link_address_sent) {
+ drm_dp_send_link_address(mgr, mstb);
+ mstb->link_address_sent = true;
+ }
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->input)
+ continue;
+
+ if (!port->ddps)
+ continue;
+
+ if (!port->available_pbn)
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
+
+ if (port->mstb)
+ drm_dp_check_and_send_link_address(mgr, port->mstb);
+ }
+}
+
+static void drm_dp_mst_link_probe_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+
+ drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
+
+}
+
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ u8 *guid)
+{
+ static u8 zero_guid[16];
+
+ if (!memcmp(guid, zero_guid, 16)) {
+ u64 salt = get_jiffies_64();
+ memcpy(&guid[0], &salt, sizeof(u64));
+ memcpy(&guid[8], &salt, sizeof(u64));
+ return false;
+ }
+ return true;
+}
+
+#if 0
+static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_REMOTE_DPCD_READ;
+ req.u.dpcd_read.port_number = port_num;
+ req.u.dpcd_read.dpcd_address = offset;
+ req.u.dpcd_read.num_bytes = num_bytes;
+ drm_dp_encode_sideband_req(&req, msg);
+
+ return 0;
+}
+#endif
+
+static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
+ bool up, u8 *msg, int len)
+{
+ int ret;
+ int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
+ int tosend, total, offset;
+ int retries = 0;
+
+retry:
+ total = len;
+ offset = 0;
+ do {
+ tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
+
+ ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret != tosend) {
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ }
+ DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
+ WARN(1, "fail\n");
+
+ return -EIO;
+ }
+ offset += tosend;
+ total -= tosend;
+ } while (total > 0);
+ return 0;
+}
+
+static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_mst_branch *mstb = txmsg->dst;
+
+ /* both msg slots are full */
+ if (txmsg->seqno == -1) {
+ if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
+ DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
+ return -EAGAIN;
+ }
+ if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
+ txmsg->seqno = mstb->last_seqno;
+ mstb->last_seqno ^= 1;
+ } else if (mstb->tx_slots[0] == NULL)
+ txmsg->seqno = 0;
+ else
+ txmsg->seqno = 1;
+ mstb->tx_slots[txmsg->seqno] = txmsg;
+ }
+ hdr->broadcast = 0;
+ hdr->path_msg = txmsg->path_msg;
+ hdr->lct = mstb->lct;
+ hdr->lcr = mstb->lct - 1;
+ if (mstb->lct > 1)
+ memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
+ hdr->seqno = txmsg->seqno;
+ return 0;
+}
+/*
+ * process a single block of the next message in the sideband queue
+ */
+static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg,
+ bool up)
+{
+ u8 chunk[48];
+ struct drm_dp_sideband_msg_hdr hdr;
+ int len, space, idx, tosend;
+ int ret;
+
+ memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
+
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
+ txmsg->seqno = -1;
+ txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
+ }
+
+ /* make hdr from dst mst - for replies use seqno
+ otherwise assign one */
+ ret = set_hdr_from_dst_qlock(&hdr, txmsg);
+ if (ret < 0)
+ return ret;
+
+ /* amount left to send in this message */
+ len = txmsg->cur_len - txmsg->cur_offset;
+
+ /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
+ space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
+
+ tosend = min(len, space);
+ if (len == txmsg->cur_len)
+ hdr.somt = 1;
+ if (space >= len)
+ hdr.eomt = 1;
+
+
+ hdr.msg_len = tosend + 1;
+ drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
+ memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
+ /* add crc at end */
+ drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
+ idx += tosend + 1;
+
+ ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
+ if (ret) {
+ DRM_DEBUG_KMS("sideband msg failed to send\n");
+ return ret;
+ }
+
+ txmsg->cur_offset += tosend;
+ if (txmsg->cur_offset == txmsg->cur_len) {
+ txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
+ return 1;
+ }
+ return 0;
+}
+
+/* must be called holding qlock */
+static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+ if (list_empty(&mgr->tx_msg_downq)) {
+ mgr->tx_down_in_progress = false;
+ return;
+ }
+ mgr->tx_down_in_progress = true;
+
+ txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, false);
+ if (ret == 1) {
+ /* txmsg is sent it should be in the slots now */
+ list_del(&txmsg->next);
+ } else if (ret) {
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ list_del(&txmsg->next);
+ if (txmsg->seqno != -1)
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ wake_up(&mgr->tx_waitq);
+ }
+ if (list_empty(&mgr->tx_msg_downq)) {
+ mgr->tx_down_in_progress = false;
+ return;
+ }
+}
+
+/* called holding qlock */
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+ if (list_empty(&mgr->tx_msg_upq)) {
+ mgr->tx_up_in_progress = false;
+ return;
+ }
+
+ txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, true);
+ if (ret == 1) {
+ /* up txmsgs aren't put in slots - so free after we send it */
+ list_del(&txmsg->next);
+ kfree(txmsg);
+ } else if (ret)
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->tx_up_in_progress = true;
+}
+
+static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ mutex_lock(&mgr->qlock);
+ list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
+ if (!mgr->tx_down_in_progress)
+ process_single_down_tx_qlock(mgr);
+ mutex_unlock(&mgr->qlock);
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ len = build_link_address(txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ int i;
+
+ if (txmsg->reply.reply_type == 1)
+ DRM_DEBUG_KMS("link address nak received\n");
+ else {
+ DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
+ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
+ txmsg->reply.u.link_addr.ports[i].input_port,
+ txmsg->reply.u.link_addr.ports[i].peer_device_type,
+ txmsg->reply.u.link_addr.ports[i].port_number,
+ txmsg->reply.u.link_addr.ports[i].dpcd_revision,
+ txmsg->reply.u.link_addr.ports[i].mcs,
+ txmsg->reply.u.link_addr.ports[i].ddps,
+ txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
+ }
+ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
+ }
+ (*mgr->cbs->hotplug)(mgr);
+ }
+ } else
+ DRM_DEBUG_KMS("link address failed %d\n", ret);
+
+ kfree(txmsg);
+ return 0;
+}
+
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ len = build_enum_path_resources(txmsg, port->port_num);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1)
+ DRM_DEBUG_KMS("enum path resources nak received\n");
+ else {
+ if (port->port_num != txmsg->reply.u.path_resources.port_number)
+ DRM_ERROR("got incorrect port in response\n");
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
+ txmsg->reply.u.path_resources.avail_payload_bw_number);
+ port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+ }
+ }
+
+ kfree(txmsg);
+ return 0;
+}
+
+static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ int pbn)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ int len, ret;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
+
+ txmsg->dst = mstb;
+ len = build_allocate_payload(txmsg, port->port_num,
+ id,
+ pbn);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1) {
+ ret = -EINVAL;
+ } else
+ ret = 0;
+ }
+ kfree(txmsg);
+fail_put:
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write_payload(mgr, id, payload);
+ if (ret < 0) {
+ payload->payload_state = 0;
+ return ret;
+ }
+ payload->payload_state = DP_PAYLOAD_LOCAL;
+ return 0;
+}
+
+static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ int ret;
+ ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
+ if (ret < 0)
+ return ret;
+ payload->payload_state = DP_PAYLOAD_REMOTE;
+ return ret;
+}
+
+static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ DRM_DEBUG_KMS("\n");
+ /* its okay for these to fail */
+ if (port) {
+ drm_dp_payload_send_msg(mgr, port, id, 0);
+ }
+
+ drm_dp_dpcd_write_payload(mgr, id, payload);
+ payload->payload_state = 0;
+ return 0;
+}
+
+static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ payload->payload_state = 0;
+ return 0;
+}
+
+/**
+ * drm_dp_update_payload_part1() - Execute payload update part 1
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step just writes the VCPI to the MST device. For slots->0
+ * transitions, this writes the updated VCPIs and removes the
+ * remote VC payloads.
+ *
+ * after calling this the driver should generate ACT and payload
+ * packets.
+ */
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int i;
+ int cur_slots = 1;
+ struct drm_dp_payload req_payload;
+ struct drm_dp_mst_port *port;
+
+ mutex_lock(&mgr->payload_lock);
+ for (i = 0; i < mgr->max_payloads; i++) {
+ /* solve the current payloads - compare to the hw ones
+ - update the hw view */
+ req_payload.start_slot = cur_slots;
+ if (mgr->proposed_vcpis[i]) {
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ } else {
+ port = NULL;
+ req_payload.num_slots = 0;
+ }
+ /* work out what is required to happen with this payload */
+ if (mgr->payloads[i].start_slot != req_payload.start_slot ||
+ mgr->payloads[i].num_slots != req_payload.num_slots) {
+
+ /* need to push an update for this payload */
+ if (req_payload.num_slots) {
+ drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
+ mgr->payloads[i].num_slots = req_payload.num_slots;
+ } else if (mgr->payloads[i].num_slots) {
+ mgr->payloads[i].num_slots = 0;
+ drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
+ req_payload.payload_state = mgr->payloads[i].payload_state;
+ } else
+ req_payload.payload_state = 0;
+
+ mgr->payloads[i].start_slot = req_payload.start_slot;
+ mgr->payloads[i].payload_state = req_payload.payload_state;
+ }
+ cur_slots += req_payload.num_slots;
+ }
+ mutex_unlock(&mgr->payload_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part1);
+
+/**
+ * drm_dp_update_payload_part2() - Execute payload update part 2
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step writes the remote VC payload commands. For slots->0
+ * this just resets some internal state.
+ */
+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_mst_port *port;
+ int i;
+ int ret = 0;
+ mutex_lock(&mgr->payload_lock);
+ for (i = 0; i < mgr->max_payloads; i++) {
+
+ if (!mgr->proposed_vcpis[i])
+ continue;
+
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+
+ DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
+ if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
+ ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
+ } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
+ ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
+ }
+ if (ret) {
+ mutex_unlock(&mgr->payload_lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&mgr->payload_lock);
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part2);
+
+#if 0 /* unused as of yet */
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ len = build_dpcd_read(txmsg, port->port_num, 0, 8);
+ txmsg->dst = port->parent;
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ return 0;
+}
+#endif
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes)
+{
+ int len;
+ int ret;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
+
+ len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+ txmsg->dst = mstb;
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1) {
+ ret = -EINVAL;
+ } else
+ ret = 0;
+ }
+ kfree(txmsg);
+fail_put:
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
+{
+ struct drm_dp_sideband_msg_reply_body reply;
+
+ reply.reply_type = 1;
+ reply.req_type = req_type;
+ drm_dp_encode_sideband_reply(&reply, msg);
+ return 0;
+}
+
+static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int req_type, int seqno, bool broadcast)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ txmsg->seqno = seqno;
+ drm_dp_encode_up_ack_reply(txmsg, req_type);
+
+ mutex_lock(&mgr->qlock);
+ list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+ if (!mgr->tx_up_in_progress) {
+ process_single_up_tx_qlock(mgr);
+ }
+ mutex_unlock(&mgr->qlock);
+ return 0;
+}
+
+static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
+{
+ switch (dp_link_bw) {
+ case DP_LINK_BW_1_62:
+ return 3 * dp_link_count;
+ case DP_LINK_BW_2_7:
+ return 5 * dp_link_count;
+ case DP_LINK_BW_5_4:
+ return 10 * dp_link_count;
+ }
+ return 0;
+}
+
+/**
+ * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
+ * @mgr: manager to set state for
+ * @mst_state: true to enable MST on this connector - false to disable.
+ *
+ * This is called by the driver when it detects an MST capable device plugged
+ * into a DP MST capable port, or when a DP MST capable device is unplugged.
+ */
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+{
+ int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
+ mutex_lock(&mgr->lock);
+ if (mst_state == mgr->mst_state)
+ goto out_unlock;
+
+ mgr->mst_state = mst_state;
+ /* set the device into MST mode */
+ if (mst_state) {
+ WARN_ON(mgr->mst_primary);
+
+ /* get dpcd info */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("failed to read DPCD\n");
+ goto out_unlock;
+ }
+
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ mgr->total_pbn = 2560;
+ mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
+ mgr->avail_slots = mgr->total_slots;
+
+ /* add initial branch device at LCT 1 */
+ mstb = drm_dp_add_mst_branch_device(1, NULL);
+ if (mstb == NULL) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ mstb->mgr = mgr;
+
+ /* give this the main reference */
+ mgr->mst_primary = mstb;
+ kref_get(&mgr->mst_primary->kref);
+
+ {
+ struct drm_dp_payload reset_pay;
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ goto out_unlock;
+ }
+
+
+ /* sort out guid */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
+ goto out_unlock;
+ }
+
+ mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
+ if (!mgr->guid_valid) {
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
+ mgr->guid_valid = true;
+ }
+
+ queue_work(system_long_wq, &mgr->work);
+
+ ret = 0;
+ } else {
+ /* disable MST on the device */
+ mstb = mgr->mst_primary;
+ mgr->mst_primary = NULL;
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+ memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+ }
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+
+/**
+ * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
+ * @mgr: manager to suspend
+ *
+ * This function tells the MST device that we can't handle UP messages
+ * anymore. This should stop it from sending any since we are suspended.
+ */
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ mutex_unlock(&mgr->lock);
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
+
+/**
+ * drm_dp_mst_topology_mgr_resume() - resume the MST manager
+ * @mgr: manager to resume
+ *
+ * This will fetch DPCD and see if the device is still there,
+ * if it is, it will rewrite the MSTM control bits, and return.
+ *
+ * if the device fails this returns -1, and the driver should do
+ * a full MST reprobe, in case we were undocked.
+ */
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary) {
+ int sret;
+ sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (sret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ ret = 0;
+ } else
+ ret = -1;
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+
+static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+{
+ int len;
+ u8 replyblock[32];
+ int replylen, origlen, curreply;
+ int ret;
+ struct drm_dp_sideband_msg_rx *msg;
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
+ msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+
+ len = min(mgr->max_dpcd_transaction_bytes, 16);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg,
+ replyblock, len);
+ if (ret != len) {
+ DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
+ return;
+ }
+ ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+ if (!ret) {
+ DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
+ return;
+ }
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen;
+
+ origlen = replylen;
+ replylen -= len;
+ curreply = len;
+ while (replylen > 0) {
+ len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret != len) {
+ DRM_DEBUG_KMS("failed to read a chunk\n");
+ }
+ ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+ if (ret == false)
+ DRM_DEBUG_KMS("failed to build sideband msg\n");
+ curreply += len;
+ replylen -= len;
+ }
+}
+
+static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+
+ drm_dp_get_one_sb_msg(mgr, false);
+
+ if (mgr->down_rep_recv.have_eomt) {
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ int slot = -1;
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->down_rep_recv.initial_hdr.lct,
+ mgr->down_rep_recv.initial_hdr.rad);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ /* find the message */
+ slot = mgr->down_rep_recv.initial_hdr.seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
+
+ if (!txmsg) {
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb,
+ mgr->down_rep_recv.initial_hdr.seqno,
+ mgr->down_rep_recv.initial_hdr.lct,
+ mgr->down_rep_recv.initial_hdr.rad[0],
+ mgr->down_rep_recv.msg[0]);
+ drm_dp_put_mst_branch_device(mstb);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ if (txmsg->reply.reply_type == 1) {
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
+ }
+
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ drm_dp_put_mst_branch_device(mstb);
+
+ mutex_lock(&mgr->qlock);
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ mstb->tx_slots[slot] = NULL;
+ mutex_unlock(&mgr->qlock);
+
+ wake_up(&mgr->tx_waitq);
+ }
+ return ret;
+}
+
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+ drm_dp_get_one_sb_msg(mgr, true);
+
+ if (mgr->up_req_recv.have_eomt) {
+ struct drm_dp_sideband_msg_req_body msg;
+ struct drm_dp_mst_branch *mstb;
+ bool seqno;
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->up_req_recv.initial_hdr.lct,
+ mgr->up_req_recv.initial_hdr.rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ seqno = mgr->up_req_recv.initial_hdr.seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+
+ if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_update_port(mstb, &msg.u.conn_stat);
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ (*mgr->cbs->hotplug)(mgr);
+
+ } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ }
+
+ drm_dp_put_mst_branch_device(mstb);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ }
+ return ret;
+}
+
+/**
+ * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
+ * @mgr: manager to notify irq for.
+ * @esi: 4 bytes from SINK_COUNT_ESI
+ *
+ * This should be called from the driver when it detects a short IRQ,
+ * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
+ * topology manager will process the sideband messages received as a result
+ * of this.
+ */
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
+{
+ int ret = 0;
+ int sc;
+ *handled = false;
+ sc = esi[0] & 0x3f;
+
+ if (sc != mgr->sink_count) {
+ mgr->sink_count = sc;
+ *handled = true;
+ }
+
+ if (esi[1] & DP_DOWN_REP_MSG_RDY) {
+ ret = drm_dp_mst_handle_down_rep(mgr);
+ *handled = true;
+ }
+
+ if (esi[1] & DP_UP_REQ_MSG_RDY) {
+ ret |= drm_dp_mst_handle_up_req(mgr);
+ *handled = true;
+ }
+
+ drm_dp_mst_kick_tx(mgr);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
+
+/**
+ * drm_dp_mst_detect_port() - get connection status for an MST port
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port
+ *
+ * This returns the current connection state for a port. It validates the
+ * port pointer still exists so the caller doesn't require a reference
+ */
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /* we need to search for the port in the mgr in case its gone */
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return connector_status_disconnected;
+
+ if (!port->ddps)
+ goto out;
+
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_NONE:
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ break;
+
+ case DP_PEER_DEVICE_SST_SINK:
+ status = connector_status_connected;
+ break;
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ if (port->ldps)
+ status = connector_status_connected;
+ break;
+ }
+out:
+ drm_dp_put_port(port);
+ return status;
+}
+EXPORT_SYMBOL(drm_dp_mst_detect_port);
+
+/**
+ * drm_dp_mst_get_edid() - get EDID for an MST port
+ * @connector: toplevel connector to get EDID for
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This returns an EDID for the port connected to a connector,
+ * It validates the pointer still exists so the caller doesn't require a
+ * reference.
+ */
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ struct edid *edid = NULL;
+
+ /* we need to search for the port in the mgr in case its gone */
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return NULL;
+
+ edid = drm_get_edid(connector, &port->aux.ddc);
+ drm_dp_put_port(port);
+ return edid;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_edid);
+
+/**
+ * drm_dp_find_vcpi_slots() - find slots for this PBN value
+ * @mgr: manager to use
+ * @pbn: payload bandwidth to convert into slots.
+ */
+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
+ int pbn)
+{
+ int num_slots;
+
+ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+ if (num_slots > mgr->avail_slots)
+ return -ENOSPC;
+ return num_slots;
+}
+EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
+
+static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_vcpi *vcpi, int pbn)
+{
+ int num_slots;
+ int ret;
+
+ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+ if (num_slots > mgr->avail_slots)
+ return -ENOSPC;
+
+ vcpi->pbn = pbn;
+ vcpi->aligned_pbn = num_slots * mgr->pbn_div;
+ vcpi->num_slots = num_slots;
+
+ ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
+ * @mgr: manager for this port
+ * @port: port to allocate a virtual channel for.
+ * @pbn: payload bandwidth number to request
+ * @slots: returned number of slots for this PBN.
+ */
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
+{
+ int ret;
+
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return false;
+
+ if (port->vcpi.vcpi > 0) {
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ if (pbn == port->vcpi.pbn) {
+ *slots = port->vcpi.num_slots;
+ return true;
+ }
+ }
+
+ ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
+ goto out;
+ }
+ DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
+ *slots = port->vcpi.num_slots;
+
+ drm_dp_put_port(port);
+ return true;
+out:
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+
+/**
+ * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This just resets the number of slots for the ports VCPI for later programming.
+ */
+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return;
+ port->vcpi.num_slots = 0;
+ drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
+
+/**
+ * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
+ * @mgr: manager for this port
+ * @port: unverified port to deallocate vcpi for
+ */
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return;
+
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ port->vcpi.num_slots = 0;
+ port->vcpi.pbn = 0;
+ port->vcpi.aligned_pbn = 0;
+ port->vcpi.vcpi = 0;
+ drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+ int id, struct drm_dp_payload *payload)
+{
+ u8 payload_alloc[3], status;
+ int ret;
+ int retries = 0;
+
+ drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = id;
+ payload_alloc[1] = payload->start_slot;
+ payload_alloc[2] = payload->num_slots;
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+ DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
+ goto fail;
+ }
+
+retry:
+ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+ retries++;
+ if (retries < 20) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+
+/**
+ * drm_dp_check_act_status() - Check ACT handled status.
+ * @mgr: manager to use
+ *
+ * Check the payload status bits in the DPCD for ACT handled completion.
+ */
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ u8 status;
+ int ret;
+ int count = 0;
+
+ do {
+ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (status & DP_PAYLOAD_ACT_HANDLED)
+ break;
+ count++;
+ udelay(100);
+
+ } while (count < 30);
+
+ if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
+ DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
+ ret = -EINVAL;
+ goto fail;
+ }
+ return 0;
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_check_act_status);
+
+/**
+ * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
+ * @clock: dot clock for the mode
+ * @bpp: bpp for the mode.
+ *
+ * This uses the formula in the spec to calculate the PBN value for a mode.
+ */
+int drm_dp_calc_pbn_mode(int clock, int bpp)
+{
+ fixed20_12 pix_bw;
+ fixed20_12 fbpp;
+ fixed20_12 result;
+ fixed20_12 margin, tmp;
+ u32 res;
+
+ pix_bw.full = dfixed_const(clock);
+ fbpp.full = dfixed_const(bpp);
+ tmp.full = dfixed_const(8);
+ fbpp.full = dfixed_div(fbpp, tmp);
+
+ result.full = dfixed_mul(pix_bw, fbpp);
+ margin.full = dfixed_const(54);
+ tmp.full = dfixed_const(64);
+ margin.full = dfixed_div(margin, tmp);
+ result.full = dfixed_div(result, margin);
+
+ margin.full = dfixed_const(1006);
+ tmp.full = dfixed_const(1000);
+ margin.full = dfixed_div(margin, tmp);
+ result.full = dfixed_mul(result, margin);
+
+ result.full = dfixed_div(result, tmp);
+ result.full = dfixed_ceil(result);
+ res = dfixed_trunc(result);
+ return res;
+}
+EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+
+static int test_calc_pbn_mode(void)
+{
+ int ret;
+ ret = drm_dp_calc_pbn_mode(154000, 30);
+ if (ret != 689)
+ return -EINVAL;
+ ret = drm_dp_calc_pbn_mode(234000, 30);
+ if (ret != 1047)
+ return -EINVAL;
+ return 0;
+}
+
+/* we want to kick the TX after we've ack the up/down IRQs. */
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
+{
+ queue_work(system_long_wq, &mgr->tx_work);
+}
+
+static void drm_dp_mst_dump_mstb(struct seq_file *m,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+ int tabs = mstb->lct;
+ char prefix[10];
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ prefix[i] = '\t';
+ prefix[i] = '\0';
+
+ seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
+ list_for_each_entry(port, &mstb->ports, next) {
+ seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
+ if (port->mstb)
+ drm_dp_mst_dump_mstb(m, port->mstb);
+ }
+}
+
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+ char *buf)
+{
+ int ret;
+ int i;
+ for (i = 0; i < 4; i++) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
+ if (ret != 16)
+ break;
+ }
+ if (i == 4)
+ return true;
+ return false;
+}
+
+/**
+ * drm_dp_mst_dump_topology(): dump topology to seq file.
+ * @m: seq_file to dump output to
+ * @mgr: manager to dump current topology for.
+ *
+ * helper to dump MST topology to a seq file for debugfs.
+ */
+void drm_dp_mst_dump_topology(struct seq_file *m,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ int i;
+ struct drm_dp_mst_port *port;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ drm_dp_mst_dump_mstb(m, mgr->mst_primary);
+
+ /* dump VCPIs */
+ mutex_unlock(&mgr->lock);
+
+ mutex_lock(&mgr->payload_lock);
+ seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
+
+ for (i = 0; i < mgr->max_payloads; i++) {
+ if (mgr->proposed_vcpis[i]) {
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
+ } else
+ seq_printf(m, "vcpi %d:unsed\n", i);
+ }
+ for (i = 0; i < mgr->max_payloads; i++) {
+ seq_printf(m, "payload %d: %d, %d, %d\n",
+ i,
+ mgr->payloads[i].payload_state,
+ mgr->payloads[i].start_slot,
+ mgr->payloads[i].num_slots);
+
+
+ }
+ mutex_unlock(&mgr->payload_lock);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary) {
+ u8 buf[64];
+ bool bret;
+ int ret;
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+ seq_printf(m, "dpcd: ");
+ for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+ seq_printf(m, "faux/mst: ");
+ for (i = 0; i < 2; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ seq_printf(m, "mst ctrl: ");
+ for (i = 0; i < 1; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+
+ bret = dump_dp_payload_table(mgr, buf);
+ if (bret == true) {
+ seq_printf(m, "payload table: ");
+ for (i = 0; i < 63; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ }
+
+ }
+
+ mutex_unlock(&mgr->lock);
+
+}
+EXPORT_SYMBOL(drm_dp_mst_dump_topology);
+
+static void drm_dp_tx_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
+
+ mutex_lock(&mgr->qlock);
+ if (mgr->tx_down_in_progress)
+ process_single_down_tx_qlock(mgr);
+ mutex_unlock(&mgr->qlock);
+}
+
+/**
+ * drm_dp_mst_topology_mgr_init - initialise a topology manager
+ * @mgr: manager struct to initialise
+ * @dev: device providing this structure - for i2c addition.
+ * @aux: DP helper aux channel to talk to this device
+ * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
+ * @max_payloads: maximum number of payloads this GPU can source
+ * @conn_base_id: the connector object ID the MST device is connected to.
+ *
+ * Return 0 for success, or negative error code on failure
+ */
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct device *dev, struct drm_dp_aux *aux,
+ int max_dpcd_transaction_bytes,
+ int max_payloads, int conn_base_id)
+{
+ mutex_init(&mgr->lock);
+ mutex_init(&mgr->qlock);
+ mutex_init(&mgr->payload_lock);
+ INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ INIT_LIST_HEAD(&mgr->tx_msg_downq);
+ INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+ INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
+ init_waitqueue_head(&mgr->tx_waitq);
+ mgr->dev = dev;
+ mgr->aux = aux;
+ mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
+ mgr->max_payloads = max_payloads;
+ mgr->conn_base_id = conn_base_id;
+ mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
+ if (!mgr->payloads)
+ return -ENOMEM;
+ mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
+ if (!mgr->proposed_vcpis)
+ return -ENOMEM;
+ set_bit(0, &mgr->payload_mask);
+ test_calc_pbn_mode();
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
+
+/**
+ * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
+ * @mgr: manager to destroy
+ */
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->payload_lock);
+ kfree(mgr->payloads);
+ mgr->payloads = NULL;
+ kfree(mgr->proposed_vcpis);
+ mgr->proposed_vcpis = NULL;
+ mutex_unlock(&mgr->payload_lock);
+ mgr->dev = NULL;
+ mgr->aux = NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
+
+/* I2C device */
+static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct drm_dp_aux *aux = adapter->algo_data;
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ unsigned int i;
+ bool reading = false;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct drm_dp_sideband_msg_tx *txmsg = NULL;
+ int ret;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EREMOTEIO;
+
+ /* construct i2c msg */
+ /* see if last msg is a read */
+ if (msgs[num - 1].flags & I2C_M_RD)
+ reading = true;
+
+ if (!reading) {
+ DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ msg.req_type = DP_REMOTE_I2C_READ;
+ msg.u.i2c_read.num_transactions = num - 1;
+ msg.u.i2c_read.port_number = port->port_num;
+ for (i = 0; i < num - 1; i++) {
+ msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
+ msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
+ msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
+ }
+ msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
+ msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ txmsg->dst = mstb;
+ drm_dp_encode_sideband_req(&msg, txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+
+ if (txmsg->reply.reply_type == 1) { /* got a NAK back */
+ ret = -EREMOTEIO;
+ goto out;
+ }
+ if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
+ ret = -EIO;
+ goto out;
+ }
+ memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
+ ret = num;
+ }
+out:
+ kfree(txmsg);
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
+ .functionality = drm_dp_mst_i2c_functionality,
+ .master_xfer = drm_dp_mst_i2c_xfer,
+};
+
+/**
+ * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
+{
+ aux->ddc.algo = &drm_dp_mst_i2c_algo;
+ aux->ddc.algo_data = aux;
+ aux->ddc.retries = 3;
+
+ aux->ddc.class = I2C_CLASS_DDC;
+ aux->ddc.owner = THIS_MODULE;
+ aux->ddc.dev.parent = aux->dev;
+ aux->ddc.dev.of_node = aux->dev->of_node;
+
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+ sizeof(aux->ddc.name));
+
+ return i2c_add_adapter(&aux->ddc);
+}
+
+/**
+ * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
+ * @aux: DisplayPort AUX channel
+ */
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
+{
+ i2c_del_adapter(&aux->ddc);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8218078b6133..3242e208c0d0 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -1,31 +1,11 @@
-/**
- * \file drm_drv.c
- * Generic driver template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * To use this template, you must at least define the following (samples
- * given for the MGA driver):
- *
- * \code
- * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
- *
- * #define DRIVER_NAME "mga"
- * #define DRIVER_DESC "Matrox G200/G400"
- * #define DRIVER_DATE "20001127"
- *
- * #define drm_x mga_##x
- * \endcode
- */
-
/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -40,432 +20,906 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mount.h>
#include <linux/slab.h>
-#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_legacy.h"
+unsigned int drm_debug = 0; /* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
-
-/** Ioctl table */
-static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if __OS_HAS_AGP
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-};
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-/** File operations structure */
-static const struct file_operations drm_stub_fops = {
- .owner = THIS_MODULE,
- .open = drm_stub_open,
- .llseek = noop_llseek,
-};
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
-static int __init drm_core_init(void)
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
+
+module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+
+static DEFINE_SPINLOCK(drm_minor_lock);
+static struct idr drm_minors_idr;
+
+struct class *drm_class;
+static struct dentry *drm_debugfs_root;
+
+int drm_err(const char *func, const char *format, ...)
{
- int ret = -ENOMEM;
+ struct va_format vaf;
+ va_list args;
+ int r;
- drm_global_init();
- drm_connector_ida_init();
- idr_init(&drm_minors_idr);
+ va_start(args, format);
- if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
- goto err_p1;
+ vaf.fmt = format;
+ vaf.va = &args;
- drm_class = drm_sysfs_create(THIS_MODULE, "drm");
- if (IS_ERR(drm_class)) {
- printk(KERN_ERR "DRM: Error creating drm class.\n");
- ret = PTR_ERR(drm_class);
- goto err_p2;
+ r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(drm_err);
+
+void drm_ut_debug_printk(const char *function_name, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+ struct drm_master *master;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return NULL;
+
+ kref_init(&master->refcount);
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+ if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
+ kfree(master);
+ return NULL;
}
+ INIT_LIST_HEAD(&master->magicfree);
+ master->minor = minor;
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
- ret = -1;
- goto err_p3;
+ return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+ kref_get(&master->refcount);
+ return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+ struct drm_master *master = container_of(kref, struct drm_master, refcount);
+ struct drm_magic_entry *pt, *next;
+ struct drm_device *dev = master->minor->dev;
+ struct drm_map_list *r_list, *list_temp;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_destroy)
+ dev->driver->master_destroy(dev, master);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+ if (r_list->master == master) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
}
- DRM_INFO("Initialized %s %d.%d.%d %s\n",
- CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
- return 0;
-err_p3:
- drm_sysfs_destroy();
-err_p2:
- unregister_chrdev(DRM_MAJOR, "drm");
+ if (master->unique) {
+ kfree(master->unique);
+ master->unique = NULL;
+ master->unique_len = 0;
+ }
- idr_destroy(&drm_minors_idr);
-err_p1:
- return ret;
+ list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+ list_del(&pt->head);
+ drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+ kfree(pt);
+ }
+
+ drm_ht_remove(&master->magiclist);
+
+ mutex_unlock(&dev->struct_mutex);
+ kfree(master);
}
-static void __exit drm_core_exit(void)
+void drm_master_put(struct drm_master **master)
{
- debugfs_remove(drm_debugfs_root);
- drm_sysfs_destroy();
+ kref_put(&(*master)->refcount, drm_master_destroy);
+ *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
- unregister_chrdev(DRM_MAJOR, "drm");
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = 0;
- drm_connector_ida_destroy();
- idr_destroy(&drm_minors_idr);
+ mutex_lock(&dev->master_mutex);
+ if (file_priv->is_master)
+ goto out_unlock;
+
+ if (file_priv->minor->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!file_priv->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
}
-module_init(drm_core_init);
-module_exit(drm_core_exit);
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = -EINVAL;
-/**
- * Copy and IOCTL return string to user space
+ mutex_lock(&dev->master_mutex);
+ if (!file_priv->is_master)
+ goto out_unlock;
+
+ if (!file_priv->minor->master)
+ goto out_unlock;
+
+ ret = 0;
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, false);
+ drm_master_put(&file_priv->minor->master);
+ file_priv->is_master = 0;
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
+}
+
+/*
+ * DRM Minors
+ * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
+ * of them is represented by a drm_minor object. Depending on the capabilities
+ * of the device-driver, different interfaces are registered.
+ *
+ * Minors can be accessed via dev->$minor_name. This pointer is either
+ * NULL or a valid drm_minor pointer and stays valid as long as the device is
+ * valid. This means, DRM minors have the same life-time as the underlying
+ * device. However, this doesn't mean that the minor is active. Minors are
+ * registered and unregistered dynamically according to device-state.
*/
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+
+static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
+ unsigned int type)
{
- int len;
+ switch (type) {
+ case DRM_MINOR_LEGACY:
+ return &dev->primary;
+ case DRM_MINOR_RENDER:
+ return &dev->render;
+ case DRM_MINOR_CONTROL:
+ return &dev->control;
+ default:
+ return NULL;
+ }
+}
- /* don't overflow userbuf */
- len = strlen(value);
- if (len > *buf_len)
- len = *buf_len;
+static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+ int r;
+
+ minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ if (!minor)
+ return -ENOMEM;
+
+ minor->type = type;
+ minor->dev = dev;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ r = idr_alloc(&drm_minors_idr,
+ NULL,
+ 64 * type,
+ 64 * (type + 1),
+ GFP_NOWAIT);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ idr_preload_end();
+
+ if (r < 0)
+ goto err_free;
+
+ minor->index = r;
+
+ minor->kdev = drm_sysfs_minor_alloc(minor);
+ if (IS_ERR(minor->kdev)) {
+ r = PTR_ERR(minor->kdev);
+ goto err_index;
+ }
- /* let userspace know exact length of driver value (which could be
- * larger than the userspace-supplied buffer) */
- *buf_len = strlen(value);
+ *drm_minor_get_slot(dev, type) = minor;
+ return 0;
+
+err_index:
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+err_free:
+ kfree(minor);
+ return r;
+}
+
+static void drm_minor_free(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor **slot, *minor;
+ unsigned long flags;
+
+ slot = drm_minor_get_slot(dev, type);
+ minor = *slot;
+ if (!minor)
+ return;
+
+ drm_mode_group_destroy(&minor->mode_group);
+ put_device(minor->kdev);
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ kfree(minor);
+ *slot = NULL;
+}
- /* finally, try filling in the userbuf */
- if (len && buf)
- if (copy_to_user(buf, value, len))
- return -EFAULT;
+static int drm_minor_register(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor)
+ return 0;
+
+ ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+ return ret;
+ }
+
+ ret = device_add(minor->kdev);
+ if (ret)
+ goto err_debugfs;
+
+ /* replace NULL with @minor so lookups will succeed from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, minor, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ DRM_DEBUG("new minor registered %d\n", minor->index);
return 0;
+
+err_debugfs:
+ drm_debugfs_cleanup(minor);
+ return ret;
+}
+
+static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor || !device_is_registered(minor->kdev))
+ return;
+
+ /* replace @minor with NULL so lookups will fail from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, NULL, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ device_del(minor->kdev);
+ dev_set_drvdata(minor->kdev, NULL); /* safety belt */
+ drm_debugfs_cleanup(minor);
}
/**
- * Get version information
+ * drm_minor_acquire - Acquire a DRM minor
+ * @minor_id: Minor ID of the DRM-minor
+ *
+ * Looks up the given minor-ID and returns the respective DRM-minor object. The
+ * refence-count of the underlying device is increased so you must release this
+ * object with drm_minor_release().
*
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_version structure.
- * \return zero on success or negative number on failure.
+ * As long as you hold this minor, it is guaranteed that the object and the
+ * minor->dev pointer will stay valid! However, the device may get unplugged and
+ * unregistered while you hold the minor.
*
- * Fills in the version information in \p arg.
+ * Returns:
+ * Pointer to minor-object with increased device-refcount, or PTR_ERR on
+ * failure.
*/
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
- struct drm_version *version = data;
- int err;
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (minor)
+ drm_dev_ref(minor->dev);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ if (!minor) {
+ return ERR_PTR(-ENODEV);
+ } else if (drm_device_is_unplugged(minor->dev)) {
+ drm_dev_unref(minor->dev);
+ return ERR_PTR(-ENODEV);
+ }
- version->version_major = dev->driver->major;
- version->version_minor = dev->driver->minor;
- version->version_patchlevel = dev->driver->patchlevel;
- err = drm_copy_field(version->name, &version->name_len,
- dev->driver->name);
- if (!err)
- err = drm_copy_field(version->date, &version->date_len,
- dev->driver->date);
- if (!err)
- err = drm_copy_field(version->desc, &version->desc_len,
- dev->driver->desc);
+ return minor;
+}
- return err;
+/**
+ * drm_minor_release - Release DRM minor
+ * @minor: Pointer to DRM minor object
+ *
+ * Release a minor that was previously acquired via drm_minor_acquire().
+ */
+void drm_minor_release(struct drm_minor *minor)
+{
+ drm_dev_unref(minor->dev);
}
/**
- * drm_ioctl_permit - Check ioctl permissions against caller
+ * drm_put_dev - Unregister and release a DRM device
+ * @dev: DRM device
+ *
+ * Called at module unload time or when a PCI device is unplugged.
*
- * @flags: ioctl permission flags.
- * @file_priv: Pointer to struct drm_file identifying the caller.
+ * Use of this function is discouraged. It will eventually go away completely.
+ * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
*
- * Checks whether the caller is allowed to run an ioctl with the
- * indicated permissions. If so, returns zero. Otherwise returns an
- * error code suitable for ioctl return.
+ * Cleans up all DRM device, calling drm_lastclose().
*/
-static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+void drm_put_dev(struct drm_device *dev)
{
- /* ROOT_ONLY is only for CAP_SYS_ADMIN */
- if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
- return -EACCES;
-
- /* AUTH is only for authenticated or render client */
- if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
- !file_priv->authenticated))
- return -EACCES;
-
- /* MASTER is only for master or control clients */
- if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
- !drm_is_control_client(file_priv)))
- return -EACCES;
-
- /* Control clients must be explicitly allowed */
- if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
- drm_is_control_client(file_priv)))
- return -EACCES;
-
- /* Render clients must be explicitly allowed */
- if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
- drm_is_render_client(file_priv)))
- return -EACCES;
+ DRM_DEBUG("\n");
- return 0;
+ if (!dev) {
+ DRM_ERROR("cleanup called no dev\n");
+ return;
+ }
+
+ drm_dev_unregister(dev);
+ drm_dev_unref(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
+
+void drm_unplug_dev(struct drm_device *dev)
+{
+ /* for a USB device */
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+
+ mutex_lock(&drm_global_mutex);
+
+ drm_device_set_unplugged(dev);
+
+ if (dev->open_count == 0) {
+ drm_put_dev(dev);
+ }
+ mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
+
+/*
+ * DRM internal mount
+ * We want to be able to allocate our own "struct address_space" to control
+ * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
+ * stand-alone address_space objects, so we need an underlying inode. As there
+ * is no way to allocate an independent inode easily, we need a fake internal
+ * VFS mount-point.
+ *
+ * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
+ * frees it again. You are allowed to use iget() and iput() to get references to
+ * the inode. But each drm_fs_inode_new() call must be paired with exactly one
+ * drm_fs_inode_free() call (which does not have to be the last iput()).
+ * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
+ * between multiple inode-users. You could, technically, call
+ * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
+ * iput(), but this way you'd end up with a new vfsmount for each inode.
+ */
+
+static int drm_fs_cnt;
+static struct vfsmount *drm_fs_mnt;
+
+static const struct dentry_operations drm_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static const struct super_operations drm_fs_sops = {
+ .statfs = simple_statfs,
+};
+
+static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type,
+ "drm:",
+ &drm_fs_sops,
+ &drm_fs_dops,
+ 0x010203ff);
+}
+
+static struct file_system_type drm_fs_type = {
+ .name = "drm",
+ .owner = THIS_MODULE,
+ .mount = drm_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *drm_fs_inode_new(void)
+{
+ struct inode *inode;
+ int r;
+
+ r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
+ if (r < 0) {
+ DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
+ return ERR_PTR(r);
+ }
+
+ inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+
+ return inode;
+}
+
+static void drm_fs_inode_free(struct inode *inode)
+{
+ if (inode) {
+ iput(inode);
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+ }
}
/**
- * Called whenever a process performs an ioctl on /dev/drm.
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
*
- * Looks up the ioctl function in the ::ioctls table, checking for root
- * previleges if so required, and dispatches to the respective function.
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
*/
-long drm_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
{
- struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
- const struct drm_ioctl_desc *ioctl = NULL;
- drm_ioctl_t *func;
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int retcode = -EINVAL;
- char stack_kdata[128];
- char *kdata = NULL;
- unsigned int usize, asize;
-
- dev = file_priv->minor->dev;
-
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
-
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- cmd = ioctl->cmd_drv;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ kref_init(&dev->ref);
+ dev->dev = parent;
+ dev->driver = driver;
+
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
+
+ spin_lock_init(&dev->buf_lock);
+ spin_lock_init(&dev->event_lock);
+ mutex_init(&dev->struct_mutex);
+ mutex_init(&dev->ctxlist_mutex);
+ mutex_init(&dev->master_mutex);
+
+ dev->anon_inode = drm_fs_inode_new();
+ if (IS_ERR(dev->anon_inode)) {
+ ret = PTR_ERR(dev->anon_inode);
+ DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
+ goto err_free;
}
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- u32 drv_size;
-
- ioctl = &drm_ioctls[nr];
- drv_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
+ }
- cmd = ioctl->cmd;
- } else
- goto err_i1;
+ if (drm_core_check_feature(dev, DRIVER_RENDER)) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
+ }
- DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, ioctl->name);
+ ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
- /* Do not trust userspace, use our own definition */
- func = ioctl->func;
+ if (drm_ht_create(&dev->map_hash, 12))
+ goto err_minors;
- if (unlikely(!func)) {
- DRM_DEBUG("no function\n");
- retcode = -EINVAL;
- goto err_i1;
+ ret = drm_legacy_ctxbitmap_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto err_ht;
}
- retcode = drm_ioctl_permit(ioctl->flags, file_priv);
- if (unlikely(retcode))
- goto err_i1;
-
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
+ if (driver->driver_features & DRIVER_GEM) {
+ ret = drm_gem_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto err_ctxbitmap;
}
- if (asize > usize)
- memset(kdata + usize, 0, asize - usize);
}
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
- }
+ return dev;
+
+err_ctxbitmap:
+ drm_legacy_ctxbitmap_cleanup(dev);
+err_ht:
+ drm_ht_remove(&dev->map_hash);
+err_minors:
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+ drm_fs_inode_free(dev->anon_inode);
+err_free:
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+static void drm_dev_release(struct kref *ref)
+{
+ struct drm_device *dev = container_of(ref, struct drm_device, ref);
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
+ drm_legacy_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_fs_inode_free(dev->anon_inode);
+
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev->unique);
+ kfree(dev);
+}
- if (ioctl->flags & DRM_UNLOCKED)
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
+/**
+ * drm_dev_ref - Take reference of a DRM device
+ * @dev: device to take reference of or NULL
+ *
+ * This increases the ref-count of @dev by one. You *must* already own a
+ * reference when calling this. Use drm_dev_unref() to drop this reference
+ * again.
+ *
+ * This function never fails. However, this function does not provide *any*
+ * guarantee whether the device is alive or running. It only provides a
+ * reference to the object and the memory associated with it.
+ */
+void drm_dev_ref(struct drm_device *dev)
+{
+ if (dev)
+ kref_get(&dev->ref);
+}
+EXPORT_SYMBOL(drm_dev_ref);
+
+/**
+ * drm_dev_unref - Drop reference of a DRM device
+ * @dev: device to drop reference of or NULL
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+void drm_dev_unref(struct drm_device *dev)
+{
+ if (dev)
+ kref_put(&dev->ref, drm_dev_release);
+}
+EXPORT_SYMBOL(drm_dev_unref);
+
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ * @flags: Flags passed to the driver's .load() function
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
+{
+ int ret;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
+
+ ret = drm_minor_register(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
+
+ ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, flags);
+ if (ret)
+ goto err_minors;
}
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
+ /* setup grouping for legacy outputs */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_unload;
}
- err_i1:
- if (!ioctl)
- DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, cmd, nr);
-
- if (kdata != stack_kdata)
- kfree(kdata);
- if (retcode)
- DRM_DEBUG("ret = %d\n", retcode);
- return retcode;
+ ret = 0;
+ goto out_unlock;
+
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+err_minors:
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
+ return ret;
}
-EXPORT_SYMBOL(drm_ioctl);
+EXPORT_SYMBOL(drm_dev_register);
/**
- * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_unref() to drop their final reference.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ drm_lastclose(dev);
+
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+
+ if (dev->agp)
+ drm_pci_agp_destroy(dev);
+
+ drm_vblank_cleanup(dev);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_rmmap(dev, r_list->map);
+
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
+
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @fmt: format string for unique name
+ *
+ * Sets the unique name of a DRM device using the specified format string and
+ * a variable list of arguments. Drivers can use this at driver probe time if
+ * the unique name of the devices they drive is static.
*
- * @nr: Ioctl number.
- * @flags: Where to return the ioctl permission flags
+ * Return: 0 on success or a negative error code on failure.
*/
-bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
{
- if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
- (nr < DRM_COMMAND_BASE)) {
- *flags = drm_ioctls[nr].flags;
- return true;
+ va_list ap;
+
+ kfree(dev->unique);
+
+ va_start(ap, fmt);
+ dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
+
+/*
+ * DRM Core
+ * The DRM core module initializes all global DRM objects and makes them
+ * available to drivers. Once setup, drivers can probe their respective
+ * devices.
+ * Currently, core management includes:
+ * - The "DRM-Global" key/value database
+ * - Global ID management for connectors
+ * - DRM major number allocation
+ * - DRM minor management
+ * - DRM sysfs class
+ * - DRM debugfs root
+ *
+ * Furthermore, the DRM core provides dynamic char-dev lookups. For each
+ * interface registered on a DRM device, you can request minor numbers from DRM
+ * core. DRM core takes care of major-number management and char-dev
+ * registration. A stub ->open() callback forwards any open() requests to the
+ * registered minor.
+ */
+
+static int drm_stub_open(struct inode *inode, struct file *filp)
+{
+ const struct file_operations *new_fops;
+ struct drm_minor *minor;
+ int err;
+
+ DRM_DEBUG("\n");
+
+ mutex_lock(&drm_global_mutex);
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor)) {
+ err = PTR_ERR(minor);
+ goto out_unlock;
+ }
+
+ new_fops = fops_get(minor->dev->driver->fops);
+ if (!new_fops) {
+ err = -ENODEV;
+ goto out_release;
}
- return false;
+ replace_fops(filp, new_fops);
+ if (filp->f_op->open)
+ err = filp->f_op->open(inode, filp);
+ else
+ err = 0;
+
+out_release:
+ drm_minor_release(minor);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
+ return err;
}
-EXPORT_SYMBOL(drm_ioctl_flags);
+
+static const struct file_operations drm_stub_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_stub_open,
+ .llseek = noop_llseek,
+};
+
+static int __init drm_core_init(void)
+{
+ int ret = -ENOMEM;
+
+ drm_global_init();
+ drm_connector_ida_init();
+ idr_init(&drm_minors_idr);
+
+ if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+ goto err_p1;
+
+ drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+ if (IS_ERR(drm_class)) {
+ printk(KERN_ERR "DRM: Error creating drm class.\n");
+ ret = PTR_ERR(drm_class);
+ goto err_p2;
+ }
+
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ if (!drm_debugfs_root) {
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
+ ret = -1;
+ goto err_p3;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+ return 0;
+err_p3:
+ drm_sysfs_destroy();
+err_p2:
+ unregister_chrdev(DRM_MAJOR, "drm");
+
+ idr_destroy(&drm_minors_idr);
+err_p1:
+ return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+ debugfs_remove(drm_debugfs_root);
+ drm_sysfs_destroy();
+
+ unregister_chrdev(DRM_MAJOR, "drm");
+
+ drm_connector_ida_destroy();
+ idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index dfa9769b26b5..1dbf3bc4c6a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3305,6 +3305,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
@@ -3775,8 +3776,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
- /* Populate picture aspect ratio from CEA mode list */
- if (frame->video_code > 0)
+ /*
+ * Populate picture aspect ratio from either
+ * user input (if specified) or from the CEA mode list.
+ */
+ if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
+ mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
+ frame->picture_aspect = mode->picture_aspect_ratio;
+ else if (frame->video_code > 0)
frame->picture_aspect = drm_get_cea_aspect_ratio(
frame->video_code);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index f27c883be391..cc0ae047ed3b 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -327,7 +327,7 @@ err_drm_gem_cma_free_object:
return ret;
}
-static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
.fb_probe = drm_fbdev_cma_create,
};
@@ -354,9 +354,10 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
helper = &fbdev_cma->fb_helper;
+ drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+
ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
if (ret < 0) {
dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d5d8cea1a679..3144db9dc0f1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -49,10 +49,11 @@ static LIST_HEAD(kernel_fb_helper_list);
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
- * Initialization is done as a three-step process with drm_fb_helper_init(),
- * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
- * Drivers with fancier requirements than the default behaviour can override the
- * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ * Initialization is done as a four-step process with drm_fb_helper_prepare(),
+ * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
+ * drm_fb_helper_initial_config(). Drivers with fancier requirements than the
+ * default behaviour can override the third step with their own code.
+ * Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
* drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
@@ -63,6 +64,19 @@ static LIST_HEAD(kernel_fb_helper_list);
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
+ *
+ * It is possible, though perhaps somewhat tricky, to implement race-free
+ * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
+ * helper must be called first to initialize the minimum required to make
+ * hotplug detection work. Drivers also need to make sure to properly set up
+ * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init()
+ * it is safe to enable interrupts and start processing hotplug events. At the
+ * same time, drivers should initialize all modeset objects such as CRTCs,
+ * encoders and connectors. To finish up the fbdev helper initialization, the
+ * drm_fb_helper_init() function is called. To probe for all attached displays
+ * and set up an initial configuration using the detected hardware, drivers
+ * should call drm_fb_helper_single_add_all_connectors() followed by
+ * drm_fb_helper_initial_config().
*/
/**
@@ -105,6 +119,58 @@ fail:
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
+{
+ struct drm_fb_helper_connector **temp;
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+ if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
+ temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
+ fb_helper->connector_info = temp;
+ }
+
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ return -ENOMEM;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ struct drm_fb_helper_connector *fb_helper_connector;
+ int i, j;
+
+ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (fb_helper->connector_info[i]->connector == connector)
+ break;
+ }
+
+ if (i == fb_helper->connector_count)
+ return -EINVAL;
+ fb_helper_connector = fb_helper->connector_info[i];
+
+ for (j = i + 1; j < fb_helper->connector_count; j++) {
+ fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
+ }
+ fb_helper->connector_count--;
+ kfree(fb_helper_connector);
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
+
static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
struct drm_fb_helper_connector *fb_helper_conn;
@@ -199,9 +265,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
struct drm_crtc_helper_funcs *funcs;
int i;
- if (list_empty(&kernel_fb_helper_list))
- return false;
-
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
for (i = 0; i < helper->crtc_count; i++) {
struct drm_mode_set *mode_set =
@@ -531,6 +594,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
}
/**
+ * drm_fb_helper_prepare - setup a drm_fb_helper structure
+ * @dev: DRM device
+ * @helper: driver-allocated fbdev helper structure to set up
+ * @funcs: pointer to structure of functions associate with this helper
+ *
+ * Sets up the bare minimum to make the framebuffer helper usable. This is
+ * useful to implement race-free initialization of the polling helpers.
+ */
+void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ const struct drm_fb_helper_funcs *funcs)
+{
+ INIT_LIST_HEAD(&helper->kernel_fb_list);
+ helper->funcs = funcs;
+ helper->dev = dev;
+}
+EXPORT_SYMBOL(drm_fb_helper_prepare);
+
+/**
* drm_fb_helper_init - initialize a drm_fb_helper structure
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
@@ -542,8 +623,7 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
* nor register the fbdev. This is only done in drm_fb_helper_initial_config()
* to allow driver writes more control over the exact init sequence.
*
- * Drivers must set fb_helper->funcs before calling
- * drm_fb_helper_initial_config().
+ * Drivers must call drm_fb_helper_prepare() before calling this function.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
@@ -558,10 +638,6 @@ int drm_fb_helper_init(struct drm_device *dev,
if (!max_conn_count)
return -EINVAL;
- fb_helper->dev = dev;
-
- INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
-
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info)
return -ENOMEM;
@@ -572,6 +648,7 @@ int drm_fb_helper_init(struct drm_device *dev,
kfree(fb_helper->crtc_info);
return -ENOMEM;
}
+ fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
@@ -1056,7 +1133,6 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
- info->fix.type_aux = 0;
info->fix.line_length = pitch;
return;
@@ -1613,8 +1689,10 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* either the output polling work or a work item launched from the driver's
* hotplug interrupt).
*
- * Note that the driver must ensure that this is only called _after_ the fb has
- * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ * Note that drivers may call this even before calling
+ * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
+ * for a race-free fbcon setup and will make sure that the fbdev emulation will
+ * not miss any hotplug events.
*
* RETURNS:
* 0 on success and a non-zero error code otherwise.
@@ -1624,11 +1702,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
u32 max_width, max_height;
- if (!fb_helper->fb)
- return 0;
-
mutex_lock(&fb_helper->dev->mode_config.mutex);
- if (!drm_fb_helper_is_bound(fb_helper)) {
+ if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&fb_helper->dev->mode_config.mutex);
return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 021fe5d11df5..79d5221c6e41 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -38,6 +38,7 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -112,55 +113,12 @@ err_undo:
EXPORT_SYMBOL(drm_open);
/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev;
- struct drm_minor *minor;
- int err = -ENODEV;
- const struct file_operations *new_fops;
-
- DRM_DEBUG("\n");
-
- mutex_lock(&drm_global_mutex);
- minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor))
- goto out_unlock;
-
- dev = minor->dev;
- new_fops = fops_get(dev->driver->fops);
- if (!new_fops)
- goto out_release;
-
- replace_fops(filp, new_fops);
- if (filp->f_op->open)
- err = filp->f_op->open(inode, filp);
-
-out_release:
- drm_minor_release(minor);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
- return err;
-}
-
-/**
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
*/
static int drm_cpu_valid(void)
{
-#if defined(__i386__)
- if (boot_cpu_data.x86 == 3)
- return 0; /* No cmpxchg on a 386 */
-#endif
#if defined(__sparc__) && !defined(__sparc_v9__)
return 0; /* No cmpxchg before v9 sparc. */
#endif
@@ -203,8 +161,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
priv->minor = minor;
/* for compatibility root is always authenticated */
- priv->always_authenticated = capable(CAP_SYS_ADMIN);
- priv->authenticated = priv->always_authenticated;
+ priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
@@ -429,6 +386,10 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("open_count = %d\n", dev->open_count);
+ mutex_lock(&dev->struct_mutex);
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->struct_mutex);
+
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
@@ -461,44 +422,18 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
+ drm_legacy_ctxbitmap_flush(dev, file_priv);
mutex_lock(&dev->master_mutex);
if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
- struct drm_file *temp;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(temp, &dev->filelist, lhead) {
- if ((temp->master == file_priv->master) &&
- (temp != file_priv))
- temp->authenticated = temp->always_authenticated;
- }
/**
* Since the master is disappearing, so is the
* possibility to lock.
*/
-
+ mutex_lock(&dev->struct_mutex);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
@@ -522,10 +457,6 @@ int drm_release(struct inode *inode, struct file *filp)
file_priv->is_master = 0;
mutex_unlock(&dev->master_mutex);
- mutex_lock(&dev->struct_mutex);
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->struct_mutex);
-
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d71190aad5..6adee4c2afc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @obj: obj in question
- * @gfpmask: gfp mask of requested pages
+ *
+ * This reads the page-array of the shmem-backing storage of the given gem
+ * object. An array of pages is returned. If a page is not allocated or
+ * swapped-out, this will allocate/swap-in the required pages. Note that the
+ * whole object is covered by the page-array and pinned in memory.
+ *
+ * Use drm_gem_put_pages() to release the array and unpin all pages.
+ *
+ * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
+ * If you require other GFP-masks, you have to do those allocations yourself.
+ *
+ * Note that you are not allowed to change gfp-zones during runtime. That is,
+ * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
+ * set during initialization. If you have special zone constraints, set them
+ * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
+ * to keep pages in the required zone during swap-in.
*/
-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
- struct inode *inode;
struct address_space *mapping;
struct page *p, **pages;
int i, npages;
/* This is the shared memory object that backs the GEM resource */
- inode = file_inode(obj->filp);
- mapping = inode->i_mapping;
+ mapping = file_inode(obj->filp)->i_mapping;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
if (pages == NULL)
return ERR_PTR(-ENOMEM);
- gfpmask |= mapping_gfp_mask(mapping);
-
for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
goto fail;
pages[i] = p;
@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required.
*/
- BUG_ON((gfpmask & __GFP_DMA32) &&
+ BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 05c97c5350a1..e467e67af6e7 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -327,7 +327,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create(dev, size);
if (IS_ERR(cma_obj))
- return ERR_PTR(PTR_ERR(cma_obj));
+ return ERR_CAST(cma_obj);
cma_obj->paddr = sg_dma_address(sgt->sgl);
cma_obj->sgt = sgt;
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index 7e4bae760e27..c3b80fd65d62 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -125,7 +125,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
parent = &entry->head;
}
if (parent) {
- hlist_add_after_rcu(parent, &item->head);
+ hlist_add_behind_rcu(&item->head, parent);
} else {
hlist_add_head_rcu(&item->head, h_list);
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 86feedd5e6f6..ecaf0fa2eec8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -132,7 +132,7 @@ int drm_bufs_info(struct seq_file *m, void *data)
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
- atomic_read(&dma->bufs[i].freelist.count),
+ 0,
dma->bufs[i].seg_count,
seg_pages,
seg_pages * PAGE_SIZE / 1024);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 69c61f392e66..40be746b7e68 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -1,11 +1,3 @@
-/**
- * \file drm_ioctl.c
- * IOCTL processing for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
*
@@ -13,6 +5,9 @@
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author Gareth Hughes <gareth@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -35,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_legacy.h"
#include <linux/pci.h>
#include <linux/export.h>
@@ -42,6 +38,124 @@
#include <asm/mtrr.h>
#endif
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+};
+
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+
/**
* Get the bus id.
*
@@ -342,8 +456,6 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->stereo_allowed = req->value;
break;
case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
- if (!drm_universal_planes)
- return -EINVAL;
if (req->value > 1)
return -EINVAL;
file_priv->universal_planes = req->value;
@@ -417,3 +529,243 @@ int drm_noop(struct drm_device *dev, void *data,
return 0;
}
EXPORT_SYMBOL(drm_noop);
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
+{
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_version *version = data;
+ int err;
+
+ version->version_major = dev->driver->major;
+ version->version_minor = dev->driver->minor;
+ version->version_patchlevel = dev->driver->patchlevel;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
+}
+
+/**
+ * drm_ioctl_permit - Check ioctl permissions against caller
+ *
+ * @flags: ioctl permission flags.
+ * @file_priv: Pointer to struct drm_file identifying the caller.
+ *
+ * Checks whether the caller is allowed to run an ioctl with the
+ * indicated permissions. If so, returns zero. Otherwise returns an
+ * error code suitable for ioctl return.
+ */
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+{
+ /* ROOT_ONLY is only for CAP_SYS_ADMIN */
+ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
+ return -EACCES;
+
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
+
+ /* MASTER is only for master or control clients */
+ if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
+ !drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Control clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
+ drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Render clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
+ drm_is_render_client(file_priv)))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ const struct drm_ioctl_desc *ioctl = NULL;
+ drm_ioctl_t *func;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int retcode = -EINVAL;
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+
+ dev = file_priv->minor->dev;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+ goto err_i1;
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ u32 drv_size;
+ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ drv_size = _IOC_SIZE(ioctl->cmd_drv);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+ cmd = ioctl->cmd_drv;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ u32 drv_size;
+
+ ioctl = &drm_ioctls[nr];
+
+ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+
+ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, ioctl->name);
+
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ goto err_i1;
+ }
+
+ retcode = drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(retcode))
+ goto err_i1;
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
+
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg,
+ usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ mutex_lock(&drm_global_mutex);
+ retcode = func(dev, kdata, file_priv);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ if (cmd & IOC_OUT) {
+ if (copy_to_user((void __user *)arg, kdata,
+ usize) != 0)
+ retcode = -EFAULT;
+ }
+
+ err_i1:
+ if (!ioctl)
+ DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+ if (retcode)
+ DRM_DEBUG("ret = %d\n", retcode);
+ return retcode;
+}
+EXPORT_SYMBOL(drm_ioctl);
+
+/**
+ * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ *
+ * @nr: Ioctl number.
+ * @flags: Where to return the ioctl permission flags
+ */
+bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+{
+ if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
+ (nr < DRM_COMMAND_BASE)) {
+ *flags = drm_ioctls[nr].flags;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
new file mode 100644
index 000000000000..d34f20a79b7c
--- /dev/null
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -0,0 +1,51 @@
+#ifndef __DRM_LEGACY_H__
+#define __DRM_LEGACY_H__
+
+/*
+ * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+struct drm_device;
+struct drm_file;
+
+/*
+ * Generic DRM Contexts
+ */
+
+#define DRM_KERNEL_CONTEXT 0
+#define DRM_RESERVED_CONTEXTS 1
+
+int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
+void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
+
+int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
+
+int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
+
+#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f6452682141b..e26b59e385ff 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -35,6 +35,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_legacy.h"
static int drm_notifier(void *priv);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index e633df2f68d8..6aa6a9e95570 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -201,16 +201,15 @@ EXPORT_SYMBOL(mipi_dsi_detach);
/**
* mipi_dsi_dcs_write - send DCS write command
* @dsi: DSI device
- * @channel: virtual channel
* @data: pointer to the command followed by parameters
* @len: length of @data
*/
-int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
- const void *data, size_t len)
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
@@ -239,19 +238,18 @@ EXPORT_SYMBOL(mipi_dsi_dcs_write);
/**
* mipi_dsi_dcs_read - send DCS read request command
* @dsi: DSI device
- * @channel: virtual channel
* @cmd: DCS read command
* @data: pointer to read buffer
* @len: length of @data
*
* Function returns number of read bytes or error code.
*/
-ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
- u8 cmd, void *data, size_t len)
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
.tx_buf = &cmd,
.tx_len = 1,
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
new file mode 100644
index 000000000000..16150a00c237
--- /dev/null
+++ b/drivers/gpu/drm/drm_of.c
@@ -0,0 +1,67 @@
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/of_graph.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_of.h>
+
+/**
+ * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * @dev: DRM device
+ * @port: port OF node
+ *
+ * Given a port OF node, return the possible mask of the corresponding
+ * CRTC within a device's list of CRTCs. Returns zero if not found.
+ */
+static uint32_t drm_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
+{
+ unsigned int index = 0;
+ struct drm_crtc *tmp;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp->port == port)
+ return 1 << index;
+
+ index++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
+ * @dev: DRM device
+ * @port: encoder port to scan for endpoints
+ *
+ * Scan all endpoints attached to a port, locate their attached CRTCs,
+ * and generate the DRM mask of CRTCs which may be attached to this
+ * encoder.
+ *
+ * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ */
+uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port)
+{
+ struct device_node *remote_port, *ep = NULL;
+ uint32_t possible_crtcs = 0;
+
+ do {
+ ep = of_graph_get_next_endpoint(port, ep);
+ if (!ep)
+ break;
+
+ remote_port = of_graph_get_remote_port(ep);
+ if (!remote_port) {
+ of_node_put(ep);
+ return 0;
+ }
+
+ possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+
+ of_node_put(remote_port);
+ } while (1);
+
+ return possible_crtcs;
+}
+EXPORT_SYMBOL(drm_of_find_possible_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 6d133149cc74..827ec1a3040b 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -335,9 +335,10 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
}
/* possible_crtc's will be filled in later by crtc_init */
- ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs,
- formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY);
+ ret = drm_universal_plane_init(dev, primary, 0,
+ &drm_primary_helper_funcs,
+ formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
primary = NULL;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d22676b89cbb..db7d250f7ac7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -130,7 +130,14 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
- count = (*connector_funcs->get_modes)(connector);
+ {
+ if (connector->override_edid) {
+ struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
+
+ count = drm_add_edid_modes(connector, edid);
+ } else
+ count = (*connector_funcs->get_modes)(connector);
+ }
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 7047ca025787..631f5afd451c 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);
+
+/**
+ * drm_rect_rotate - Rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation to be applied
+ *
+ * Apply @rotation to the coordinates of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the untransformed coordinate
+ * space.
+ */
+void drm_rect_rotate(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation)
+{
+ struct drm_rect tmp;
+
+ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ tmp = *r;
+
+ if (rotation & BIT(DRM_REFLECT_X)) {
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ }
+
+ if (rotation & BIT(DRM_REFLECT_Y)) {
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ }
+ }
+
+ switch (rotation & 0xf) {
+ case BIT(DRM_ROTATE_0):
+ break;
+ case BIT(DRM_ROTATE_90):
+ tmp = *r;
+ r->x1 = tmp.y1;
+ r->x2 = tmp.y2;
+ r->y1 = width - tmp.x2;
+ r->y2 = width - tmp.x1;
+ break;
+ case BIT(DRM_ROTATE_180):
+ tmp = *r;
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ break;
+ case BIT(DRM_ROTATE_270):
+ tmp = *r;
+ r->x1 = height - tmp.y2;
+ r->x2 = height - tmp.y1;
+ r->y1 = tmp.x1;
+ r->y2 = tmp.x2;
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_rect_rotate);
+
+/**
+ * drm_rect_rotate_inv - Inverse rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation whose inverse is to be applied
+ *
+ * Apply the inverse of @rotation to the coordinates
+ * of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the original untransformed
+ * coordinate space, so that you never have to flip
+ * them when doing a rotatation and its inverse.
+ * That is, if you do:
+ *
+ * drm_rotate(&r, width, height, rotation);
+ * drm_rotate_inv(&r, width, height, rotation);
+ *
+ * you will always get back the original rectangle.
+ */
+void drm_rect_rotate_inv(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation)
+{
+ struct drm_rect tmp;
+
+ switch (rotation & 0xf) {
+ case BIT(DRM_ROTATE_0):
+ break;
+ case BIT(DRM_ROTATE_90):
+ tmp = *r;
+ r->x1 = width - tmp.y2;
+ r->x2 = width - tmp.y1;
+ r->y1 = tmp.x1;
+ r->y2 = tmp.x2;
+ break;
+ case BIT(DRM_ROTATE_180):
+ tmp = *r;
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ break;
+ case BIT(DRM_ROTATE_270):
+ tmp = *r;
+ r->x1 = tmp.y1;
+ r->x2 = tmp.y2;
+ r->y1 = height - tmp.x2;
+ r->y2 = height - tmp.x1;
+ break;
+ default:
+ break;
+ }
+
+ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ tmp = *r;
+
+ if (rotation & BIT(DRM_REFLECT_X)) {
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ }
+
+ if (rotation & BIT(DRM_REFLECT_Y)) {
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_rect_rotate_inv);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
deleted file mode 100644
index 14d16464000a..000000000000
--- a/drivers/gpu/drm/drm_stub.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
- *
- * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author Rickard E. (Rik) Faith <faith@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mount.h>
-#include <linux/slab.h>
-#include <drm/drmP.h>
-#include <drm/drm_core.h>
-
-unsigned int drm_debug = 0; /* 1 to enable debug output */
-EXPORT_SYMBOL(drm_debug);
-
-unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
-EXPORT_SYMBOL(drm_rnodes);
-
-/* 1 to allow user space to request universal planes (experimental) */
-unsigned int drm_universal_planes = 0;
-EXPORT_SYMBOL(drm_universal_planes);
-
-unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
-EXPORT_SYMBOL(drm_vblank_offdelay);
-
-unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-EXPORT_SYMBOL(drm_timestamp_precision);
-
-/*
- * Default to use monotonic timestamps for wait-for-vblank and page-flip
- * complete events.
- */
-unsigned int drm_timestamp_monotonic = 1;
-
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
-MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
-MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
-MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
-MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
-
-module_param_named(debug, drm_debug, int, 0600);
-module_param_named(rnodes, drm_rnodes, int, 0600);
-module_param_named(universal_planes, drm_universal_planes, int, 0600);
-module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
-module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
-
-static DEFINE_SPINLOCK(drm_minor_lock);
-struct idr drm_minors_idr;
-
-struct class *drm_class;
-struct dentry *drm_debugfs_root;
-
-int drm_err(const char *func, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
-
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(drm_err);
-
-void drm_ut_debug_printk(const char *function_name, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
-
- printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
- kfree(master);
- return NULL;
- }
- INIT_LIST_HEAD(&master->magicfree);
- master->minor = minor;
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
- struct drm_device *dev = master->minor->dev;
- struct drm_map_list *r_list, *list_temp;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
-
- if (master->unique) {
- kfree(master->unique);
- master->unique = NULL;
- master->unique_len = 0;
- }
-
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
- drm_ht_remove(&master->magiclist);
-
- mutex_unlock(&dev->struct_mutex);
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- mutex_lock(&dev->master_mutex);
- if (file_priv->is_master)
- goto out_unlock;
-
- if (file_priv->minor->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = -EINVAL;
-
- mutex_lock(&dev->master_mutex);
- if (!file_priv->is_master)
- goto out_unlock;
-
- if (!file_priv->minor->master)
- goto out_unlock;
-
- ret = 0;
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-/*
- * DRM Minors
- * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
- * of them is represented by a drm_minor object. Depending on the capabilities
- * of the device-driver, different interfaces are registered.
- *
- * Minors can be accessed via dev->$minor_name. This pointer is either
- * NULL or a valid drm_minor pointer and stays valid as long as the device is
- * valid. This means, DRM minors have the same life-time as the underlying
- * device. However, this doesn't mean that the minor is active. Minors are
- * registered and unregistered dynamically according to device-state.
- */
-
-static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
- unsigned int type)
-{
- switch (type) {
- case DRM_MINOR_LEGACY:
- return &dev->primary;
- case DRM_MINOR_RENDER:
- return &dev->render;
- case DRM_MINOR_CONTROL:
- return &dev->control;
- default:
- return NULL;
- }
-}
-
-static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *minor;
-
- minor = kzalloc(sizeof(*minor), GFP_KERNEL);
- if (!minor)
- return -ENOMEM;
-
- minor->type = type;
- minor->dev = dev;
-
- *drm_minor_get_slot(dev, type) = minor;
- return 0;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor **slot;
-
- slot = drm_minor_get_slot(dev, type);
- if (*slot) {
- drm_mode_group_destroy(&(*slot)->mode_group);
- kfree(*slot);
- *slot = NULL;
- }
-}
-
-static int drm_minor_register(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *new_minor;
- unsigned long flags;
- int ret;
- int minor_id;
-
- DRM_DEBUG("\n");
-
- new_minor = *drm_minor_get_slot(dev, type);
- if (!new_minor)
- return 0;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor_id = idr_alloc(&drm_minors_idr,
- NULL,
- 64 * type,
- 64 * (type + 1),
- GFP_NOWAIT);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- idr_preload_end();
-
- if (minor_id < 0)
- return minor_id;
-
- new_minor->index = minor_id;
-
- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_id;
- }
-
- ret = drm_sysfs_device_add(new_minor);
- if (ret) {
- DRM_ERROR("DRM: Error sysfs_device_add.\n");
- goto err_debugfs;
- }
-
- /* replace NULL with @minor so lookups will succeed from now on */
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_replace(&drm_minors_idr, new_minor, new_minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- DRM_DEBUG("new minor assigned %d\n", minor_id);
- return 0;
-
-err_debugfs:
- drm_debugfs_cleanup(new_minor);
-err_id:
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor_id);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- new_minor->index = 0;
- return ret;
-}
-
-static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- minor = *drm_minor_get_slot(dev, type);
- if (!minor || !minor->kdev)
- return;
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- minor->index = 0;
-
- drm_debugfs_cleanup(minor);
- drm_sysfs_device_remove(minor);
-}
-
-/**
- * drm_minor_acquire - Acquire a DRM minor
- * @minor_id: Minor ID of the DRM-minor
- *
- * Looks up the given minor-ID and returns the respective DRM-minor object. The
- * refence-count of the underlying device is increased so you must release this
- * object with drm_minor_release().
- *
- * As long as you hold this minor, it is guaranteed that the object and the
- * minor->dev pointer will stay valid! However, the device may get unplugged and
- * unregistered while you hold the minor.
- *
- * Returns:
- * Pointer to minor-object with increased device-refcount, or PTR_ERR on
- * failure.
- */
-struct drm_minor *drm_minor_acquire(unsigned int minor_id)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor = idr_find(&drm_minors_idr, minor_id);
- if (minor)
- drm_dev_ref(minor->dev);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- if (!minor) {
- return ERR_PTR(-ENODEV);
- } else if (drm_device_is_unplugged(minor->dev)) {
- drm_dev_unref(minor->dev);
- return ERR_PTR(-ENODEV);
- }
-
- return minor;
-}
-
-/**
- * drm_minor_release - Release DRM minor
- * @minor: Pointer to DRM minor object
- *
- * Release a minor that was previously acquired via drm_minor_acquire().
- */
-void drm_minor_release(struct drm_minor *minor)
-{
- drm_dev_unref(minor->dev);
-}
-
-/**
- * drm_put_dev - Unregister and release a DRM device
- * @dev: DRM device
- *
- * Called at module unload time or when a PCI device is unplugged.
- *
- * Use of this function is discouraged. It will eventually go away completely.
- * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
- *
- * Cleans up all DRM device, calling drm_lastclose().
- */
-void drm_put_dev(struct drm_device *dev)
-{
- DRM_DEBUG("\n");
-
- if (!dev) {
- DRM_ERROR("cleanup called no dev\n");
- return;
- }
-
- drm_dev_unregister(dev);
- drm_dev_unref(dev);
-}
-EXPORT_SYMBOL(drm_put_dev);
-
-void drm_unplug_dev(struct drm_device *dev)
-{
- /* for a USB device */
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-
- mutex_lock(&drm_global_mutex);
-
- drm_device_set_unplugged(dev);
-
- if (dev->open_count == 0) {
- drm_put_dev(dev);
- }
- mutex_unlock(&drm_global_mutex);
-}
-EXPORT_SYMBOL(drm_unplug_dev);
-
-/*
- * DRM internal mount
- * We want to be able to allocate our own "struct address_space" to control
- * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
- * stand-alone address_space objects, so we need an underlying inode. As there
- * is no way to allocate an independent inode easily, we need a fake internal
- * VFS mount-point.
- *
- * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
- * frees it again. You are allowed to use iget() and iput() to get references to
- * the inode. But each drm_fs_inode_new() call must be paired with exactly one
- * drm_fs_inode_free() call (which does not have to be the last iput()).
- * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
- * between multiple inode-users. You could, technically, call
- * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
- * iput(), but this way you'd end up with a new vfsmount for each inode.
- */
-
-static int drm_fs_cnt;
-static struct vfsmount *drm_fs_mnt;
-
-static const struct dentry_operations drm_fs_dops = {
- .d_dname = simple_dname,
-};
-
-static const struct super_operations drm_fs_sops = {
- .statfs = simple_statfs,
-};
-
-static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- return mount_pseudo(fs_type,
- "drm:",
- &drm_fs_sops,
- &drm_fs_dops,
- 0x010203ff);
-}
-
-static struct file_system_type drm_fs_type = {
- .name = "drm",
- .owner = THIS_MODULE,
- .mount = drm_fs_mount,
- .kill_sb = kill_anon_super,
-};
-
-static struct inode *drm_fs_inode_new(void)
-{
- struct inode *inode;
- int r;
-
- r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
- if (r < 0) {
- DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
- return ERR_PTR(r);
- }
-
- inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
- if (IS_ERR(inode))
- simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
-
- return inode;
-}
-
-static void drm_fs_inode_free(struct inode *inode)
-{
- if (inode) {
- iput(inode);
- simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
- }
-}
-
-/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
- * @parent: Parent device object
- *
- * Allocate and initialize a new DRM device. No device registration is done.
- * Call drm_dev_register() to advertice the device to user space and register it
- * with other core subsystems.
- *
- * The initial ref-count of the object is 1. Use drm_dev_ref() and
- * drm_dev_unref() to take and drop further ref-counts.
- *
- * RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
- */
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent)
-{
- struct drm_device *dev;
- int ret;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
-
- kref_init(&dev->ref);
- dev->dev = parent;
- dev->driver = driver;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->buf_lock);
- spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
- mutex_init(&dev->master_mutex);
-
- dev->anon_inode = drm_fs_inode_new();
- if (IS_ERR(dev->anon_inode)) {
- ret = PTR_ERR(dev->anon_inode);
- DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
- goto err_free;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
- }
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
- if (ret)
- goto err_minors;
- }
-
- ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
- if (ret)
- goto err_minors;
-
- if (drm_ht_create(&dev->map_hash, 12))
- goto err_minors;
-
- ret = drm_ctxbitmap_init(dev);
- if (ret) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto err_ht;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- ret = drm_gem_init(dev);
- if (ret) {
- DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
- goto err_ctxbitmap;
- }
- }
-
- return dev;
-
-err_ctxbitmap:
- drm_ctxbitmap_cleanup(dev);
-err_ht:
- drm_ht_remove(&dev->map_hash);
-err_minors:
- drm_minor_free(dev, DRM_MINOR_LEGACY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_minor_free(dev, DRM_MINOR_CONTROL);
- drm_fs_inode_free(dev->anon_inode);
-err_free:
- mutex_destroy(&dev->master_mutex);
- kfree(dev);
- return NULL;
-}
-EXPORT_SYMBOL(drm_dev_alloc);
-
-static void drm_dev_release(struct kref *ref)
-{
- struct drm_device *dev = container_of(ref, struct drm_device, ref);
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_destroy(dev);
-
- drm_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
- drm_fs_inode_free(dev->anon_inode);
-
- drm_minor_free(dev, DRM_MINOR_LEGACY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_minor_free(dev, DRM_MINOR_CONTROL);
-
- mutex_destroy(&dev->master_mutex);
- kfree(dev->unique);
- kfree(dev);
-}
-
-/**
- * drm_dev_ref - Take reference of a DRM device
- * @dev: device to take reference of or NULL
- *
- * This increases the ref-count of @dev by one. You *must* already own a
- * reference when calling this. Use drm_dev_unref() to drop this reference
- * again.
- *
- * This function never fails. However, this function does not provide *any*
- * guarantee whether the device is alive or running. It only provides a
- * reference to the object and the memory associated with it.
- */
-void drm_dev_ref(struct drm_device *dev)
-{
- if (dev)
- kref_get(&dev->ref);
-}
-EXPORT_SYMBOL(drm_dev_ref);
-
-/**
- * drm_dev_unref - Drop reference of a DRM device
- * @dev: device to drop reference of or NULL
- *
- * This decreases the ref-count of @dev by one. The device is destroyed if the
- * ref-count drops to zero.
- */
-void drm_dev_unref(struct drm_device *dev)
-{
- if (dev)
- kref_put(&dev->ref, drm_dev_release);
-}
-EXPORT_SYMBOL(drm_dev_unref);
-
-/**
- * drm_dev_register - Register DRM device
- * @dev: Device to register
- * @flags: Flags passed to the driver's .load() function
- *
- * Register the DRM device @dev with the system, advertise device to user-space
- * and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously.
- *
- * Never call this twice on any device!
- *
- * RETURNS:
- * 0 on success, negative error code on failure.
- */
-int drm_dev_register(struct drm_device *dev, unsigned long flags)
-{
- int ret;
-
- mutex_lock(&drm_global_mutex);
-
- ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
-
- ret = drm_minor_register(dev, DRM_MINOR_RENDER);
- if (ret)
- goto err_minors;
-
- ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
- if (ret)
- goto err_minors;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, flags);
- if (ret)
- goto err_minors;
- }
-
- /* setup grouping for legacy outputs */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_unload;
- }
-
- ret = 0;
- goto out_unlock;
-
-err_unload:
- if (dev->driver->unload)
- dev->driver->unload(dev);
-err_minors:
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_dev_register);
-
-/**
- * drm_dev_unregister - Unregister DRM device
- * @dev: Device to unregister
- *
- * Unregister the DRM device from the system. This does the reverse of
- * drm_dev_register() but does not deallocate the device. The caller must call
- * drm_dev_unref() to drop their final reference.
- */
-void drm_dev_unregister(struct drm_device *dev)
-{
- struct drm_map_list *r_list, *list_temp;
-
- drm_lastclose(dev);
-
- if (dev->driver->unload)
- dev->driver->unload(dev);
-
- if (dev->agp)
- drm_pci_agp_destroy(dev);
-
- drm_vblank_cleanup(dev);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
-
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-}
-EXPORT_SYMBOL(drm_dev_unregister);
-
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @fmt: format string for unique name
- *
- * Sets the unique name of a DRM device using the specified format string and
- * a variable list of arguments. Drivers can use this at driver probe time if
- * the unique name of the devices they drive is static.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
-{
- va_list ap;
-
- kfree(dev->unique);
-
- va_start(ap, fmt);
- dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
- va_end(ap);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 369b26278e76..ab1a5f6dde8a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -438,7 +438,6 @@ err_out_files:
out:
return ret;
}
-EXPORT_SYMBOL(drm_sysfs_connector_add);
/**
* drm_sysfs_connector_remove - remove an connector device from sysfs
@@ -468,7 +467,6 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
device_unregister(connector->kdev);
connector->kdev = NULL;
}
-EXPORT_SYMBOL(drm_sysfs_connector_remove);
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
@@ -495,71 +493,55 @@ static void drm_sysfs_release(struct device *dev)
}
/**
- * drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @dev: DRM device to be added
- * @head: DRM head in question
+ * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor
+ * @minor: minor to allocate sysfs device for
*
- * Add a DRM device to the DRM's device model class. We use @dev's PCI device
- * as the parent for the Linux device, and make sure it has a file containing
- * the driver we're using (for userspace compatibility).
+ * This allocates a new sysfs device for @minor and returns it. The device is
+ * not registered nor linked. The caller has to use device_add() and
+ * device_del() to register and unregister it.
+ *
+ * Note that dev_get_drvdata() on the new device will return the minor.
+ * However, the device does not hold a ref-count to the minor nor to the
+ * underlying drm_device. This is unproblematic as long as you access the
+ * private data only in sysfs callbacks. device_del() disables those
+ * synchronously, so they cannot be called after you cleanup a minor.
*/
-int drm_sysfs_device_add(struct drm_minor *minor)
+struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
- char *minor_str;
+ const char *minor_str;
+ struct device *kdev;
int r;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
- else if (minor->type == DRM_MINOR_RENDER)
- minor_str = "renderD%d";
- else
- minor_str = "card%d";
-
- minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
- if (!minor->kdev) {
- r = -ENOMEM;
- goto error;
- }
-
- device_initialize(minor->kdev);
- minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
- minor->kdev->class = drm_class;
- minor->kdev->type = &drm_sysfs_device_minor;
- minor->kdev->parent = minor->dev->dev;
- minor->kdev->release = drm_sysfs_release;
- dev_set_drvdata(minor->kdev, minor);
-
- r = dev_set_name(minor->kdev, minor_str, minor->index);
+ else if (minor->type == DRM_MINOR_RENDER)
+ minor_str = "renderD%d";
+ else
+ minor_str = "card%d";
+
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return ERR_PTR(-ENOMEM);
+
+ device_initialize(kdev);
+ kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ kdev->class = drm_class;
+ kdev->type = &drm_sysfs_device_minor;
+ kdev->parent = minor->dev->dev;
+ kdev->release = drm_sysfs_release;
+ dev_set_drvdata(kdev, minor);
+
+ r = dev_set_name(kdev, minor_str, minor->index);
if (r < 0)
- goto error;
-
- r = device_add(minor->kdev);
- if (r < 0)
- goto error;
-
- return 0;
+ goto err_free;
-error:
- DRM_ERROR("device create failed %d\n", r);
- put_device(minor->kdev);
- return r;
-}
+ return kdev;
-/**
- * drm_sysfs_device_remove - remove DRM device
- * @dev: DRM device to remove
- *
- * This call unregisters and cleans up a class device that was created with a
- * call to drm_sysfs_device_add()
- */
-void drm_sysfs_device_remove(struct drm_minor *minor)
-{
- if (minor->kdev)
- device_unregister(minor->kdev);
- minor->kdev = NULL;
+err_free:
+ put_device(kdev);
+ return ERR_PTR(r);
}
-
/**
* drm_class_device_register - Register a struct device in the drm class.
*
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 178d2a9672a8..7f9f6f9e9b7e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -28,6 +28,7 @@ config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
select FB_MODE_HELPERS
+ select MFD_SYSCON
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -52,6 +53,7 @@ config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
+ select DRM_PANEL
help
This enables support for DP device.
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index a8ffc8c1477b..4f3c7eb2d37d 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
@@ -28,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <drm/bridge/ptn3460.h>
#include "exynos_drm_drv.h"
@@ -41,7 +41,7 @@ struct bridge_init {
struct device_node *node;
};
-static int exynos_dp_init_dp(struct exynos_dp_device *dp)
+static void exynos_dp_init_dp(struct exynos_dp_device *dp)
{
exynos_dp_reset(dp);
@@ -58,8 +58,6 @@ static int exynos_dp_init_dp(struct exynos_dp_device *dp)
exynos_dp_init_hpd(dp);
exynos_dp_init_aux(dp);
-
- return 0;
}
static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
@@ -875,10 +873,24 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
static void exynos_dp_hotplug(struct work_struct *work)
{
struct exynos_dp_device *dp;
- int ret;
dp = container_of(work, struct exynos_dp_device, hotplug_work);
+ if (dp->drm_dev)
+ drm_helper_hpd_irq_event(dp->drm_dev);
+}
+
+static void exynos_dp_commit(struct exynos_drm_display *display)
+{
+ struct exynos_dp_device *dp = display->ctx;
+ int ret;
+
+ /* Keep the panel disabled while we configure video */
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel))
+ DRM_ERROR("failed to disable the panel\n");
+ }
+
ret = exynos_dp_detect_hpd(dp);
if (ret) {
/* Cable has been disconnected, we're done */
@@ -909,6 +921,12 @@ static void exynos_dp_hotplug(struct work_struct *work)
ret = exynos_dp_config_video(dp);
if (ret)
dev_err(dp->dev, "unable to config video\n");
+
+ /* Safe to enable the panel now */
+ if (dp->panel) {
+ if (drm_panel_enable(dp->panel))
+ DRM_ERROR("failed to enable the panel\n");
+ }
}
static enum drm_connector_status exynos_dp_detect(
@@ -933,15 +951,18 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
struct exynos_dp_device *dp = ctx_from_connector(connector);
struct drm_display_mode *mode;
+ if (dp->panel)
+ return drm_panel_get_modes(dp->panel);
+
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_ERROR("failed to create a new display mode.\n");
return 0;
}
- drm_display_mode_from_videomode(&dp->panel.vm, mode);
- mode->width_mm = dp->panel.width_mm;
- mode->height_mm = dp->panel.height_mm;
+ drm_display_mode_from_videomode(&dp->priv.vm, mode);
+ mode->width_mm = dp->priv.width_mm;
+ mode->height_mm = dp->priv.height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
@@ -1018,10 +1039,13 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
+ if (dp->panel)
+ ret = drm_panel_attach(dp->panel, &dp->connector);
+
+ return ret;
}
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
@@ -1050,26 +1074,50 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
}
}
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_poweron(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_prepare(dp->panel)) {
+ DRM_ERROR("failed to setup the panel\n");
+ return;
+ }
+ }
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
+ exynos_dp_commit(display);
}
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_poweroff(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel)) {
+ DRM_ERROR("failed to disable the panel\n");
+ return;
+ }
+ }
+
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
+
+ if (dp->panel) {
+ if (drm_panel_unprepare(dp->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+ }
}
static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
@@ -1078,12 +1126,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- exynos_dp_poweron(dp);
+ exynos_dp_poweron(display);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- exynos_dp_poweroff(dp);
+ exynos_dp_poweroff(display);
break;
default:
break;
@@ -1094,6 +1142,7 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
static struct exynos_drm_display_ops exynos_dp_display_ops = {
.create_connector = exynos_dp_create_connector,
.dpms = exynos_dp_dpms,
+ .commit = exynos_dp_commit,
};
static struct exynos_drm_display exynos_dp_display = {
@@ -1201,7 +1250,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
- ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm,
+ ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm,
OF_USE_NATIVE_MODE);
if (ret) {
DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
@@ -1215,16 +1264,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct resource *res;
- struct exynos_dp_device *dp;
+ struct exynos_dp_device *dp = exynos_dp_display.ctx;
unsigned int irq_flags;
-
int ret = 0;
- dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
- GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
-
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1236,9 +1279,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = exynos_dp_dt_parse_panel(dp);
- if (ret)
- return ret;
+ if (!dp->panel) {
+ ret = exynos_dp_dt_parse_panel(dp);
+ if (ret)
+ return ret;
+ }
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
@@ -1298,7 +1343,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
disable_irq(dp->irq);
dp->drm_dev = drm_dev;
- exynos_dp_display.ctx = dp;
platform_set_drvdata(pdev, &exynos_dp_display);
@@ -1325,6 +1369,9 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *panel_node;
+ struct exynos_dp_device *dp;
int ret;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
@@ -1332,6 +1379,21 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ panel_node = of_parse_phandle(dev->of_node, "panel", 0);
+ if (panel_node) {
+ dp->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!dp->panel)
+ return -EPROBE_DEFER;
+ }
+
+ exynos_dp_display.ctx = dp;
+
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
@@ -1376,6 +1438,7 @@ static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
};
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
@@ -1390,4 +1453,4 @@ struct platform_driver dp_driver = {
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DP Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 02cc4f9ab903..a1aee6931bd7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -149,6 +149,7 @@ struct exynos_dp_device {
struct drm_device *drm_dev;
struct drm_connector connector;
struct drm_encoder *encoder;
+ struct drm_panel *panel;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -162,7 +163,7 @@ struct exynos_dp_device {
int dpms_mode;
int hpd_gpio;
- struct exynos_drm_panel_info panel;
+ struct exynos_drm_panel_info priv;
};
/* exynos_dp_reg.c */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 9a16dbe121d1..ba9b3d5ed672 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -117,20 +117,7 @@ static struct drm_encoder *exynos_drm_best_encoder(
struct drm_device *dev = connector->dev;
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
- obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
- exynos_connector->encoder_id);
- return NULL;
- }
-
- encoder = obj_to_encoder(obj);
-
- return encoder;
+ return drm_encoder_find(dev, exynos_connector->encoder_id);
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -185,7 +172,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(exynos_connector);
}
@@ -230,7 +217,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
drm_connector_init(dev, connector, &exynos_connector_funcs, type);
drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
- err = drm_sysfs_connector_add(connector);
+ err = drm_connector_register(connector);
if (err)
goto err_connector;
@@ -250,7 +237,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
return connector;
err_sysfs:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
err_connector:
drm_connector_cleanup(connector);
kfree(exynos_connector);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 95c9435d0266..b68e58f78cd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -69,8 +69,10 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
if (mode > DRM_MODE_DPMS_ON) {
/* wait for the completion of page flip. */
- wait_event(exynos_crtc->pending_flip_queue,
- atomic_read(&exynos_crtc->pending_flip) == 0);
+ if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
+ !atomic_read(&exynos_crtc->pending_flip),
+ HZ/20))
+ atomic_set(&exynos_crtc->pending_flip, 0);
drm_vblank_off(crtc->dev, exynos_crtc->pipe);
}
@@ -259,6 +261,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ atomic_set(&exynos_crtc->pending_flip, 0);
spin_unlock_irq(&dev->event_lock);
goto out;
@@ -508,3 +511,11 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
return -EPERM;
}
+
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->te_handler)
+ manager->ops->te_handler(manager);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 9f74b10a8a01..690dcddab725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -36,4 +36,11 @@ void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type);
+/*
+ * This function calls the crtc device(manager)'s te_handler() callback
+ * to trigger to transfer video image at the tearing effect synchronization
+ * signal.
+ */
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 9e530f205ad2..fa08f05e3e34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -48,7 +48,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
static void exynos_dpi_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -117,7 +117,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -125,14 +125,18 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
static void exynos_dpi_poweron(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
+ drm_panel_prepare(ctx->panel);
drm_panel_enable(ctx->panel);
+ }
}
static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
drm_panel_disable(ctx->panel);
+ drm_panel_unprepare(ctx->panel);
+ }
}
static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ab7d182063c3..0d74e9b99c4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,8 +39,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define VBLANK_OFF_DELAY 50000
-
static struct platform_device *exynos_drm_pdev;
static DEFINE_MUTEX(drm_component_lock);
@@ -103,8 +101,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
- drm_vblank_offdelay = VBLANK_OFF_DELAY;
-
platform_set_drvdata(dev->platformdev, dev);
/* Try to bind all sub drivers. */
@@ -362,7 +358,7 @@ static int exynos_drm_sys_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
pm_message_t message;
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
message.event = PM_EVENT_SUSPEND;
@@ -373,7 +369,7 @@ static int exynos_drm_sys_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
return exynos_drm_resume(drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 06cde4506278..69a6fa397d75 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -40,8 +40,6 @@ struct drm_device;
struct exynos_drm_overlay;
struct drm_connector;
-extern unsigned int drm_vblank_offdelay;
-
/* This enumerates device type. */
enum exynos_drm_device_type {
EXYNOS_DEVICE_TYPE_NONE,
@@ -188,6 +186,8 @@ struct exynos_drm_display {
* @win_commit: apply hardware specific overlay data to registers.
* @win_enable: enable hardware specific overlay.
* @win_disable: disable hardware specific overlay.
+ * @te_handler: trigger to transfer video image at the tearing effect
+ * synchronization signal if there is a page flip request.
*/
struct exynos_drm_manager;
struct exynos_drm_manager_ops {
@@ -206,6 +206,7 @@ struct exynos_drm_manager_ops {
void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
+ void (*te_handler)(struct exynos_drm_manager *mgr);
};
/*
@@ -236,14 +237,9 @@ struct exynos_drm_g2d_private {
struct list_head userptr_list;
};
-struct exynos_drm_ipp_private {
- struct device *dev;
- struct list_head event_list;
-};
-
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
- struct exynos_drm_ipp_private *ipp_priv;
+ struct device *ipp_dev;
struct file *anon_filp;
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6302aa64f6c1..442aa2d00132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -16,7 +16,10 @@
#include <drm/drm_panel.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
@@ -24,6 +27,7 @@
#include <video/mipi_display.h>
#include <video/videomode.h>
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
/* returns true iff both arguments logically differs */
@@ -54,9 +58,12 @@
/* FIFO memory AC characteristic register */
#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
-#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */
#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
+#define DSIM_PHYCTRL_REG 0x5c
+#define DSIM_PHYTIMING_REG 0x64
+#define DSIM_PHYTIMING1_REG 0x68
+#define DSIM_PHYTIMING2_REG 0x6c
/* DSIM_STATUS */
#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
@@ -200,6 +207,24 @@
#define DSIM_PLL_M(x) ((x) << 4)
#define DSIM_PLL_S(x) ((x) << 1)
+/* DSIM_PHYCTRL */
+#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
+
+/* DSIM_PHYTIMING */
+#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
+#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
+
+/* DSIM_PHYTIMING1 */
+#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
+#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
+#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
+#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
+
+/* DSIM_PHYTIMING2 */
+#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
+#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
+#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
+
#define DSI_MAX_BUS_WIDTH 4
#define DSI_NUM_VIRTUAL_CHANNELS 4
#define DSI_TX_FIFO_SIZE 2048
@@ -233,6 +258,12 @@ struct exynos_dsi_transfer {
#define DSIM_STATE_INITIALIZED BIT(1)
#define DSIM_STATE_CMD_LPM BIT(2)
+struct exynos_dsi_driver_data {
+ unsigned int plltmr_reg;
+
+ unsigned int has_freqband:1;
+};
+
struct exynos_dsi {
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
@@ -247,6 +278,7 @@ struct exynos_dsi {
struct clk *bus_clk;
struct regulator_bulk_data supplies[2];
int irq;
+ int te_gpio;
u32 pll_clk_rate;
u32 burst_clk_rate;
@@ -262,11 +294,39 @@ struct exynos_dsi {
spinlock_t transfer_lock; /* protects transfer_list */
struct list_head transfer_list;
+
+ struct exynos_dsi_driver_data *driver_data;
};
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+};
+
+static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
+ .plltmr_reg = 0x58,
+};
+
+static struct of_device_id exynos_dsi_of_match[] = {
+ { .compatible = "samsung,exynos4210-mipi-dsi",
+ .data = &exynos4_dsi_driver_data },
+ { .compatible = "samsung,exynos5410-mipi-dsi",
+ .data = &exynos5_dsi_driver_data },
+ { }
+};
+
+static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
+ struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(exynos_dsi_of_match, &pdev->dev);
+
+ return (struct exynos_dsi_driver_data *)of_id->data;
+}
+
static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
{
if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -340,14 +400,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
unsigned long freq)
{
- static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
- };
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
unsigned long fin, fout;
- int timeout, band;
+ int timeout;
u8 p, s;
u16 m;
u32 reg;
@@ -368,18 +423,30 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
"failed to find PLL PMS for requested frequency\n");
return -EFAULT;
}
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
- for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
- if (fout < freq_bands[band])
- break;
+ writel(500, dsi->reg_base + driver_data->plltmr_reg);
+
+ reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
- dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
- p, m, s, band);
+ if (driver_data->has_freqband) {
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ int band;
- writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "band %d\n", band);
+
+ reg |= DSIM_FREQ_BAND(band);
+ }
- reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
- | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
timeout = 1000;
@@ -433,6 +500,59 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
return 0;
}
+static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
+{
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ u32 reg;
+
+ if (driver_data->has_freqband)
+ return;
+
+ /* B D-PHY: D-PHY Master & Slave Analog Block control */
+ reg = DSIM_PHYCTRL_ULPS_EXIT(0x0af);
+ writel(reg, dsi->reg_base + DSIM_PHYCTRL_REG);
+
+ /*
+ * T LPX: Transmitted length of any Low-Power state period
+ * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
+ * burst
+ */
+ reg = DSIM_PHYTIMING_LPX(0x06) | DSIM_PHYTIMING_HS_EXIT(0x0b);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING_REG);
+
+ /*
+ * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Clock.
+ * T CLK_POST: Time that the transmitter continues to send HS clock
+ * after the last associated Data Lane has transitioned to LP Mode
+ * Interval is defined as the period from the end of T HS-TRAIL to
+ * the beginning of T CLK-TRAIL
+ * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
+ * the last payload clock bit of a HS transmission burst
+ */
+ reg = DSIM_PHYTIMING1_CLK_PREPARE(0x07) |
+ DSIM_PHYTIMING1_CLK_ZERO(0x27) |
+ DSIM_PHYTIMING1_CLK_POST(0x0d) |
+ DSIM_PHYTIMING1_CLK_TRAIL(0x08);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING1_REG);
+
+ /*
+ * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Sync sequence.
+ * T HS-TRAIL: Time that the transmitter drives the flipped differential
+ * state after last payload data bit of a HS transmission burst
+ */
+ reg = DSIM_PHYTIMING2_HS_PREPARE(0x09) | DSIM_PHYTIMING2_HS_ZERO(0x0d) |
+ DSIM_PHYTIMING2_HS_TRAIL(0x0b);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING2_REG);
+}
+
static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
{
u32 reg;
@@ -468,13 +588,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
/* DSI configuration */
reg = 0;
+ /*
+ * The first bit of mode_flags specifies display configuration.
+ * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
+ * mode, otherwise it will support command mode.
+ */
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
reg |= DSIM_VIDEO_MODE;
+ /*
+ * The user manual describes that following bits are ignored in
+ * command mode.
+ */
if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
reg |= DSIM_MFLUSH_VS;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
- reg |= DSIM_EOT_DISABLE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
reg |= DSIM_SYNC_INFORM;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -491,6 +618,9 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
reg |= DSIM_HSA_MODE;
}
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+ reg |= DSIM_EOT_DISABLE;
+
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
@@ -944,17 +1074,90 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
+{
+ struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
+ struct drm_encoder *encoder = dsi->encoder;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ exynos_drm_crtc_te_handler(encoder->crtc);
+
+ return IRQ_HANDLED;
+}
+
+static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
+{
+ enable_irq(dsi->irq);
+
+ if (gpio_is_valid(dsi->te_gpio))
+ enable_irq(gpio_to_irq(dsi->te_gpio));
+}
+
+static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
+{
+ if (gpio_is_valid(dsi->te_gpio))
+ disable_irq(gpio_to_irq(dsi->te_gpio));
+
+ disable_irq(dsi->irq);
+}
+
static int exynos_dsi_init(struct exynos_dsi *dsi)
{
- exynos_dsi_enable_clock(dsi);
exynos_dsi_reset(dsi);
- enable_irq(dsi->irq);
+ exynos_dsi_enable_irq(dsi);
+ exynos_dsi_enable_clock(dsi);
exynos_dsi_wait_for_reset(dsi);
+ exynos_dsi_set_phy_ctrl(dsi);
exynos_dsi_init_link(dsi);
return 0;
}
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ if (!gpio_is_valid(dsi->te_gpio)) {
+ dev_err(dsi->dev, "no te-gpios specified\n");
+ ret = dsi->te_gpio;
+ goto out;
+ }
+
+ ret = gpio_request_one(dsi->te_gpio, GPIOF_IN, "te_gpio");
+ if (ret) {
+ dev_err(dsi->dev, "gpio request failed with %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
+ * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
+ * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
+ * called by drm_panel_init() before panel is attached.
+ */
+ ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
+ exynos_dsi_te_irq_handler, NULL,
+ IRQF_TRIGGER_RISING, "TE", dsi);
+ if (ret) {
+ dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
+ gpio_free(dsi->te_gpio);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
+{
+ if (gpio_is_valid(dsi->te_gpio)) {
+ free_irq(gpio_to_irq(dsi->te_gpio), dsi);
+ gpio_free(dsi->te_gpio);
+ dsi->te_gpio = -ENOENT;
+ }
+}
+
static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
@@ -968,6 +1171,19 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
if (dsi->connector.dev)
drm_helper_hpd_irq_event(dsi->connector.dev);
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ int ret = exynos_dsi_register_te_irq(dsi);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -976,6 +1192,8 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
{
struct exynos_dsi *dsi = host_to_dsi(host);
+ exynos_dsi_unregister_te_irq(dsi);
+
dsi->panel_node = NULL;
if (dsi->connector.dev)
@@ -1089,7 +1307,7 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
exynos_dsi_disable_clock(dsi);
- disable_irq(dsi->irq);
+ exynos_dsi_disable_irq(dsi);
}
dsi->state &= ~DSIM_STATE_CMD_LPM;
@@ -1115,7 +1333,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- ret = drm_panel_enable(dsi->panel);
+ ret = drm_panel_prepare(dsi->panel);
if (ret < 0) {
exynos_dsi_poweroff(dsi);
return ret;
@@ -1124,6 +1342,14 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
+ exynos_dsi_poweroff(dsi);
+ return ret;
+ }
+
dsi->state |= DSIM_STATE_ENABLED;
return 0;
@@ -1134,8 +1360,9 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
- exynos_dsi_set_display_enable(dsi, false);
drm_panel_disable(dsi->panel);
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
dsi->state &= ~DSIM_STATE_ENABLED;
@@ -1246,7 +1473,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1278,6 +1505,7 @@ static struct exynos_drm_display exynos_dsi_display = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.ops = &exynos_dsi_display_ops,
};
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
/* of_* functions will be removed after merge of of_graph patches */
static struct device_node *
@@ -1435,6 +1663,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
goto err_del_component;
}
+ /* To be checked as invalid one */
+ dsi->te_gpio = -ENOENT;
+
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
@@ -1443,6 +1674,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dsi_host.dev = &pdev->dev;
dsi->dev = &pdev->dev;
+ dsi->driver_data = exynos_dsi_get_driver_data(pdev);
ret = exynos_dsi_parse_dt(dsi);
if (ret)
@@ -1525,11 +1757,6 @@ static int exynos_dsi_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id exynos_dsi_of_match[] = {
- { .compatible = "samsung,exynos4210-mipi-dsi" },
- { }
-};
-
struct platform_driver dsi_driver = {
.probe = exynos_dsi_probe,
.remove = exynos_dsi_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index d771b467cf0c..32e63f60e1d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
@@ -266,7 +266,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
return -ENOMEM;
private->fb_helper = helper = &fbdev->drm_fb_helper;
- helper->funcs = &exynos_drm_fb_helper_funcs;
+
+ drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
num_crtc = dev->mode_config.num_crtc;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 831dde9034c6..ec7cc9ea50df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1887,6 +1887,7 @@ static const struct of_device_id fimc_of_match[] = {
{ .compatible = "samsung,exynos4212-fimc" },
{ },
};
+MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33161ad38201..5d09e33fef87 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,8 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -61,6 +63,24 @@
/* color key value register for hardware window 1 ~ 4. */
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
+/* I80 / RGB trigger control register */
+#define TRIGCON 0x1A4
+#define TRGMODE_I80_RGB_ENABLE_I80 (1 << 0)
+#define SWTRGCMD_I80_RGB_ENABLE (1 << 1)
+
+/* display mode change control register except exynos4 */
+#define VIDOUT_CON 0x000
+#define VIDOUT_CON_F_I80_LDI0 (0x2 << 8)
+
+/* I80 interface control for main LDI register */
+#define I80IFCONFAx(x) (0x1B0 + (x) * 4)
+#define I80IFCONFBx(x) (0x1B8 + (x) * 4)
+#define LCD_CS_SETUP(x) ((x) << 16)
+#define LCD_WR_SETUP(x) ((x) << 12)
+#define LCD_WR_ACTIVE(x) ((x) << 8)
+#define LCD_WR_HOLD(x) ((x) << 4)
+#define I80IFEN_ENABLE (1 << 0)
+
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
@@ -68,10 +88,14 @@
struct fimd_driver_data {
unsigned int timing_base;
+ unsigned int lcdblk_offset;
+ unsigned int lcdblk_vt_shift;
+ unsigned int lcdblk_bypass_shift;
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
unsigned int has_limited_fmt:1;
+ unsigned int has_vidoutcon:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -82,12 +106,19 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
+ .lcdblk_offset = 0x210,
+ .lcdblk_vt_shift = 10,
+ .lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
+ .lcdblk_offset = 0x214,
+ .lcdblk_vt_shift = 24,
+ .lcdblk_bypass_shift = 15,
.has_shadowcon = 1,
+ .has_vidoutcon = 1,
};
struct fimd_win_data {
@@ -112,15 +143,22 @@ struct fimd_context {
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
+ struct regmap *sysreg;
struct drm_display_mode mode;
struct fimd_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
+ u32 vidcon0;
u32 vidcon1;
+ u32 vidout_con;
+ u32 i80ifcon;
+ bool i80_if;
bool suspended;
int pipe;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
+ atomic_t win_updated;
+ atomic_t triggering;
struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
@@ -136,6 +174,7 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
@@ -243,6 +282,14 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
u32 clkdiv;
+ if (ctx->i80_if) {
+ /*
+ * The frame done interrupt should be occurred prior to the
+ * next TE signal.
+ */
+ ideal_clk *= 2;
+ }
+
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
@@ -271,11 +318,10 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
struct drm_display_mode *mode = &ctx->mode;
- struct fimd_driver_data *driver_data;
- u32 val, clkdiv, vidcon1;
- int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+ struct fimd_driver_data *driver_data = ctx->driver_data;
+ void *timing_base = ctx->regs + driver_data->timing_base;
+ u32 val, clkdiv;
- driver_data = ctx->driver_data;
if (ctx->suspended)
return;
@@ -283,33 +329,65 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
if (mode->htotal == 0 || mode->vtotal == 0)
return;
- /* setup polarity values */
- vidcon1 = ctx->vidcon1;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- vidcon1 |= VIDCON1_INV_VSYNC;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- vidcon1 |= VIDCON1_INV_HSYNC;
- writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
-
- /* setup vertical timing values. */
- vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
- vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
- vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
-
- val = VIDTCON0_VBPD(vbpd - 1) |
- VIDTCON0_VFPD(vfpd - 1) |
- VIDTCON0_VSPW(vsync_len - 1);
- writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
-
- /* setup horizontal timing values. */
- hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
- hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
- hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
-
- val = VIDTCON1_HBPD(hbpd - 1) |
- VIDTCON1_HFPD(hfpd - 1) |
- VIDTCON1_HSPW(hsync_len - 1);
- writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+ if (ctx->i80_if) {
+ val = ctx->i80ifcon | I80IFEN_ENABLE;
+ writel(val, timing_base + I80IFCONFAx(0));
+
+ /* disable auto frame rate */
+ writel(0, timing_base + I80IFCONFBx(0));
+
+ /* set video type selection to I80 interface */
+ if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ driver_data->lcdblk_offset,
+ 0x3 << driver_data->lcdblk_vt_shift,
+ 0x1 << driver_data->lcdblk_vt_shift)) {
+ DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
+ return;
+ }
+ } else {
+ int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+ u32 vidcon1;
+
+ /* setup polarity values */
+ vidcon1 = ctx->vidcon1;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ vidcon1 |= VIDCON1_INV_VSYNC;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ vidcon1 |= VIDCON1_INV_HSYNC;
+ writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
+
+ /* setup vertical timing values. */
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
+ vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
+ val = VIDTCON0_VBPD(vbpd - 1) |
+ VIDTCON0_VFPD(vfpd - 1) |
+ VIDTCON0_VSPW(vsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
+
+ /* setup horizontal timing values. */
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
+ hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+ val = VIDTCON1_HBPD(hbpd - 1) |
+ VIDTCON1_HFPD(hfpd - 1) |
+ VIDTCON1_HSPW(hsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+ }
+
+ if (driver_data->has_vidoutcon)
+ writel(ctx->vidout_con, timing_base + VIDOUT_CON);
+
+ /* set bypass selection */
+ if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ driver_data->lcdblk_offset,
+ 0x1 << driver_data->lcdblk_bypass_shift,
+ 0x1 << driver_data->lcdblk_bypass_shift)) {
+ DRM_ERROR("Failed to update sysreg for bypass setting.\n");
+ return;
+ }
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
@@ -322,7 +400,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
*/
- val = VIDCON0_ENVID | VIDCON0_ENVID_F;
+ val = ctx->vidcon0;
+ val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
if (ctx->driver_data->has_clksel)
val |= VIDCON0_CLKSEL_LCD;
@@ -660,6 +739,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
}
win_data->enabled = true;
+
+ if (ctx->i80_if)
+ atomic_set(&ctx->win_updated, 1);
}
static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
@@ -838,6 +920,58 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
}
}
+static void fimd_trigger(struct device *dev)
+{
+ struct exynos_drm_manager *mgr = get_fimd_manager(dev);
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_driver_data *driver_data = ctx->driver_data;
+ void *timing_base = ctx->regs + driver_data->timing_base;
+ u32 reg;
+
+ atomic_set(&ctx->triggering, 1);
+
+ reg = readl(ctx->regs + VIDINTCON0);
+ reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
+ VIDINTCON0_INT_SYSMAINCON);
+ writel(reg, ctx->regs + VIDINTCON0);
+
+ reg = readl(timing_base + TRIGCON);
+ reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
+ writel(reg, timing_base + TRIGCON);
+}
+
+static void fimd_te_handler(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ /* Checks the crtc is detached already from encoder */
+ if (ctx->pipe < 0 || !ctx->drm_dev)
+ return;
+
+ /*
+ * Skips to trigger if in triggering state, because multiple triggering
+ * requests can cause panel reset.
+ */
+ if (atomic_read(&ctx->triggering))
+ return;
+
+ /*
+ * If there is a page flip request, triggers and handles the page flip
+ * event so that current fb can be updated into panel GRAM.
+ */
+ if (atomic_add_unless(&ctx->win_updated, -1, 0))
+ fimd_trigger(ctx->dev);
+
+ /* Wakes up vsync event queue */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+
+ if (!atomic_read(&ctx->triggering))
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ }
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.mode_fixup = fimd_mode_fixup,
@@ -849,6 +983,7 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
.win_mode_set = fimd_win_mode_set,
.win_commit = fimd_win_commit,
.win_disable = fimd_win_disable,
+ .te_handler = fimd_te_handler,
};
static struct exynos_drm_manager fimd_manager = {
@@ -859,26 +994,40 @@ static struct exynos_drm_manager fimd_manager = {
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
- u32 val;
+ u32 val, clear_bit;
val = readl(ctx->regs + VIDINTCON1);
- if (val & VIDINTCON1_INT_FRAME)
- /* VSYNC interrupt */
- writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
+ if (val & clear_bit)
+ writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
goto out;
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ if (ctx->i80_if) {
+ /* unset I80 frame done interrupt */
+ val = readl(ctx->regs + VIDINTCON0);
+ val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
+ writel(val, ctx->regs + VIDINTCON0);
- /* set wait vsync event to zero and wake up queue. */
- if (atomic_read(&ctx->wait_vsync_event)) {
- atomic_set(&ctx->wait_vsync_event, 0);
- wake_up(&ctx->wait_vsync_queue);
+ /* exit triggering mode */
+ atomic_set(&ctx->triggering, 0);
+
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ } else {
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
}
+
out:
return IRQ_HANDLED;
}
@@ -923,6 +1072,7 @@ static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
+ struct device_node *i80_if_timings;
struct resource *res;
int ret = -EINVAL;
@@ -944,12 +1094,51 @@ static int fimd_probe(struct platform_device *pdev)
ctx->dev = dev;
ctx->suspended = true;
+ ctx->driver_data = drm_fimd_get_driver_data(pdev);
if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
ctx->vidcon1 |= VIDCON1_INV_VDEN;
if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
ctx->vidcon1 |= VIDCON1_INV_VCLK;
+ i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
+ if (i80_if_timings) {
+ u32 val;
+
+ ctx->i80_if = true;
+
+ if (ctx->driver_data->has_vidoutcon)
+ ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0;
+ else
+ ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0;
+ /*
+ * The user manual describes that this "DSI_EN" bit is required
+ * to enable I80 24-bit data interface.
+ */
+ ctx->vidcon0 |= VIDCON0_DSI_EN;
+
+ if (of_property_read_u32(i80_if_timings, "cs-setup", &val))
+ val = 0;
+ ctx->i80ifcon = LCD_CS_SETUP(val);
+ if (of_property_read_u32(i80_if_timings, "wr-setup", &val))
+ val = 0;
+ ctx->i80ifcon |= LCD_WR_SETUP(val);
+ if (of_property_read_u32(i80_if_timings, "wr-active", &val))
+ val = 1;
+ ctx->i80ifcon |= LCD_WR_ACTIVE(val);
+ if (of_property_read_u32(i80_if_timings, "wr-hold", &val))
+ val = 0;
+ ctx->i80ifcon |= LCD_WR_HOLD(val);
+ }
+ of_node_put(i80_if_timings);
+
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_warn(dev, "failed to get system register.\n");
+ ctx->sysreg = NULL;
+ }
+
ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
@@ -972,7 +1161,8 @@ static int fimd_probe(struct platform_device *pdev)
goto err_del_component;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ ctx->i80_if ? "lcd_sys" : "vsync");
if (!res) {
dev_err(dev, "irq request failed.\n");
ret = -ENXIO;
@@ -986,7 +1176,6 @@ static int fimd_probe(struct platform_device *pdev)
goto err_del_component;
}
- ctx->driver_data = drm_fimd_get_driver_data(pdev);
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 800158714473..df7a77d3eff8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1042,8 +1042,23 @@ err:
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev;
+ struct g2d_data *g2d;
struct drm_exynos_g2d_get_ver *ver = data;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
@@ -1056,7 +1071,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev = g2d_priv->dev;
+ struct device *dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
@@ -1067,6 +1082,10 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
int size;
int ret;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
if (!dev)
return -ENODEV;
@@ -1223,13 +1242,17 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev = g2d_priv->dev;
+ struct device *dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
if (!dev)
return -ENODEV;
@@ -1544,8 +1567,10 @@ static const struct dev_pm_ops g2d_pm_ops = {
static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
+ { .compatible = "samsung,exynos4212-g2d" },
{},
};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 163a054922cb..15db80138382 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -301,7 +301,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, filp, gem_handle);
@@ -310,8 +309,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
return;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
drm_gem_object_unreference_unlocked(obj);
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index a1888e128f1d..c411399070d6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -129,9 +129,6 @@ void exynos_platform_device_ipp_unregister(void)
int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
{
- if (!ippdrv)
- return -EINVAL;
-
mutex_lock(&exynos_drm_ippdrv_lock);
list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -141,9 +138,6 @@ int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
{
- if (!ippdrv)
- return -EINVAL;
-
mutex_lock(&exynos_drm_ippdrv_lock);
list_del(&ippdrv->drv_list);
mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -151,20 +145,15 @@ int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
return 0;
}
-static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
- u32 *idp)
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
{
int ret;
- /* do the allocation under our mutexlock */
mutex_lock(lock);
ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
mutex_unlock(lock);
- if (ret < 0)
- return ret;
- *idp = ret;
- return 0;
+ return ret;
}
static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -178,35 +167,25 @@ static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
{
void *obj;
- DRM_DEBUG_KMS("id[%d]\n", id);
-
mutex_lock(lock);
-
- /* find object using handle */
obj = idr_find(id_idr, id);
- if (!obj) {
- DRM_ERROR("failed to find object.\n");
- mutex_unlock(lock);
- return ERR_PTR(-ENODEV);
- }
-
mutex_unlock(lock);
return obj;
}
-static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
- enum drm_exynos_ipp_cmd cmd)
+static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
{
- /*
- * check dedicated flag and WB, OUTPUT operation with
- * power on state.
- */
- if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
- !pm_runtime_suspended(ippdrv->dev)))
- return true;
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return -EBUSY;
- return false;
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property))
+ return -EINVAL;
+
+ return 0;
}
static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
@@ -214,62 +193,30 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
{
struct exynos_drm_ippdrv *ippdrv;
u32 ipp_id = property->ipp_id;
-
- DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
+ int ret;
if (ipp_id) {
- /* find ipp driver using idr */
- ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
- ipp_id);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("not found ipp%d driver.\n", ipp_id);
- return ippdrv;
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
+ if (!ippdrv) {
+ DRM_DEBUG("ipp%d driver not found\n", ipp_id);
+ return ERR_PTR(-ENODEV);
}
- /*
- * WB, OUTPUT opertion not supported multi-operation.
- * so, make dedicated state at set property ioctl.
- * when ipp driver finished operations, clear dedicated flags.
- */
- if (ipp_check_dedicated(ippdrv, property->cmd)) {
- DRM_ERROR("already used choose device.\n");
- return ERR_PTR(-EBUSY);
- }
-
- /*
- * This is necessary to find correct device in ipp drivers.
- * ipp drivers have different abilities,
- * so need to check property.
- */
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property)) {
- DRM_ERROR("not support property.\n");
- return ERR_PTR(-EINVAL);
+ ret = ipp_check_driver(ippdrv, property);
+ if (ret < 0) {
+ DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
+ return ERR_PTR(ret);
}
return ippdrv;
} else {
- /*
- * This case is search all ipp driver for finding.
- * user application don't set ipp_id in this case,
- * so ipp subsystem search correct driver in driver list.
- */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- if (ipp_check_dedicated(ippdrv, property->cmd)) {
- DRM_DEBUG_KMS("used device.\n");
- continue;
- }
-
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property)) {
- DRM_DEBUG_KMS("not support property.\n");
- continue;
- }
-
- return ippdrv;
+ ret = ipp_check_driver(ippdrv, property);
+ if (ret == 0)
+ return ippdrv;
}
- DRM_ERROR("not support ipp driver operations.\n");
+ DRM_DEBUG("cannot find driver suitable for given property.\n");
}
return ERR_PTR(-ENODEV);
@@ -308,8 +255,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_prop_list *prop_list = data;
struct exynos_drm_ippdrv *ippdrv;
@@ -346,10 +292,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id);
- if (IS_ERR(ippdrv)) {
+ if (!ippdrv) {
DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id);
- return PTR_ERR(ippdrv);
+ return -ENODEV;
}
*prop_list = ippdrv->prop_list;
@@ -432,7 +378,7 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
if (!event_work)
return ERR_PTR(-ENOMEM);
- INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+ INIT_WORK(&event_work->work, ipp_sched_event);
return event_work;
}
@@ -441,8 +387,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_property *property = data;
struct exynos_drm_ippdrv *ippdrv;
@@ -489,19 +434,18 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
if (!c_node)
return -ENOMEM;
- /* create property id */
- ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
- &property->prop_id);
- if (ret) {
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
+ if (ret < 0) {
DRM_ERROR("failed to create id.\n");
goto err_clear;
}
+ property->prop_id = ret;
DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
property->prop_id, property->cmd, (int)ippdrv);
/* stored property information and ippdrv in private data */
- c_node->priv = priv;
+ c_node->dev = dev;
c_node->property = *property;
c_node->state = IPP_STATE_IDLE;
@@ -534,7 +478,6 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
INIT_LIST_HEAD(&c_node->mem_list[i]);
INIT_LIST_HEAD(&c_node->event_list);
- list_splice_init(&priv->event_list, &c_node->event_list);
mutex_lock(&ippdrv->cmd_lock);
list_add_tail(&c_node->list, &ippdrv->cmd_list);
mutex_unlock(&ippdrv->cmd_lock);
@@ -577,42 +520,18 @@ static void ipp_clean_cmd_node(struct ipp_context *ctx,
kfree(c_node);
}
-static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
{
- struct drm_exynos_ipp_property *property = &c_node->property;
- struct drm_exynos_ipp_mem_node *m_node;
- struct list_head *head;
- int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
-
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- /* find memory node entry */
- list_for_each_entry(m_node, head, list) {
- DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
- i ? "dst" : "src", count[i], (int)m_node);
- count[i]++;
- }
+ switch (c_node->property.cmd) {
+ case IPP_CMD_WB:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
+ case IPP_CMD_OUTPUT:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
+ case IPP_CMD_M2M:
+ default:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
+ !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
}
-
- DRM_DEBUG_KMS("min[%d]max[%d]\n",
- min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
- max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
-
- /*
- * M2M operations should be need paired memory address.
- * so, need to check minimum count about src, dst.
- * other case not use paired memory, so use maximum count
- */
- if (ipp_is_m2m_cmd(property->cmd))
- ret = min(count[EXYNOS_DRM_OPS_SRC],
- count[EXYNOS_DRM_OPS_DST]);
- else
- ret = max(count[EXYNOS_DRM_OPS_SRC],
- count[EXYNOS_DRM_OPS_DST]);
-
- return ret;
}
static struct drm_exynos_ipp_mem_node
@@ -683,16 +602,14 @@ static struct drm_exynos_ipp_mem_node
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_buf_info buf_info;
- void *addr;
+ struct drm_exynos_ipp_buf_info *buf_info;
int i;
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
if (!m_node)
return ERR_PTR(-ENOMEM);
- /* clear base address for error handling */
- memset(&buf_info, 0x0, sizeof(buf_info));
+ buf_info = &m_node->buf_info;
/* operations, buffer id */
m_node->ops_id = qbuf->ops_id;
@@ -707,6 +624,8 @@ static struct drm_exynos_ipp_mem_node
/* get dma address by handle */
if (qbuf->handle[i]) {
+ dma_addr_t *addr;
+
addr = exynos_drm_gem_get_dma_addr(drm_dev,
qbuf->handle[i], file);
if (IS_ERR(addr)) {
@@ -714,15 +633,14 @@ static struct drm_exynos_ipp_mem_node
goto err_clear;
}
- buf_info.handles[i] = qbuf->handle[i];
- buf_info.base[i] = *(dma_addr_t *) addr;
- DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
- i, buf_info.base[i], (int)buf_info.handles[i]);
+ buf_info->handles[i] = qbuf->handle[i];
+ buf_info->base[i] = *addr;
+ DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
+ buf_info->base[i], buf_info->handles[i]);
}
}
m_node->filp = file;
- m_node->buf_info = buf_info;
mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
@@ -930,8 +848,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_queue_buf *qbuf = data;
struct drm_exynos_ipp_cmd_node *c_node;
@@ -955,9 +872,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
- if (IS_ERR(c_node)) {
+ if (!c_node) {
DRM_ERROR("failed to get command node.\n");
- return PTR_ERR(c_node);
+ return -ENODEV;
}
/* buffer control */
@@ -1062,9 +979,8 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -1091,9 +1007,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
- if (IS_ERR(c_node)) {
+ if (!c_node) {
DRM_ERROR("invalid command node list.\n");
- return PTR_ERR(c_node);
+ return -ENODEV;
}
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
@@ -1198,7 +1114,6 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
/* reset h/w block */
if (ippdrv->reset &&
ippdrv->reset(ippdrv->dev)) {
- DRM_ERROR("failed to reset.\n");
return -EINVAL;
}
@@ -1216,30 +1131,24 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
/* set format */
if (ops->set_fmt) {
ret = ops->set_fmt(ippdrv->dev, config->fmt);
- if (ret) {
- DRM_ERROR("not support format.\n");
+ if (ret)
return ret;
- }
}
/* set transform for rotation, flip */
if (ops->set_transf) {
ret = ops->set_transf(ippdrv->dev, config->degree,
config->flip, &swap);
- if (ret) {
- DRM_ERROR("not support tranf.\n");
- return -EINVAL;
- }
+ if (ret)
+ return ret;
}
/* set size */
if (ops->set_size) {
ret = ops->set_size(ippdrv->dev, swap, &config->pos,
&config->sz);
- if (ret) {
- DRM_ERROR("not support size.\n");
+ if (ret)
return ret;
- }
}
}
@@ -1283,11 +1192,6 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("failed to get node.\n");
- ret = -EFAULT;
- goto err_unlock;
- }
DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
@@ -1545,11 +1449,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
tbuf_id[i] = m_node->buf_id;
DRM_DEBUG_KMS("%s buf_id[%d]\n",
@@ -1586,11 +1485,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
@@ -1704,21 +1598,17 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- u32 ipp_id;
-
ippdrv->drm_dev = drm_dev;
- ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
- &ipp_id);
- if (ret || ipp_id == 0) {
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
+ if (ret < 0) {
DRM_ERROR("failed to create id.\n");
goto err;
}
+ ippdrv->prop_list.ipp_id = ret;
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
- count++, (int)ippdrv, ipp_id);
-
- ippdrv->prop_list.ipp_id = ipp_id;
+ count++, (int)ippdrv, ret);
/* store parent device for node */
ippdrv->parent_dev = dev;
@@ -1776,17 +1666,10 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->dev = dev;
- file_priv->ipp_priv = priv;
- INIT_LIST_HEAD(&priv->event_list);
+ file_priv->ipp_dev = dev;
- DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
+ DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
return 0;
}
@@ -1795,13 +1678,12 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
int count = 0;
- DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
+ DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
mutex_lock(&ippdrv->cmd_lock);
@@ -1810,7 +1692,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
count++, (int)ippdrv);
- if (c_node->priv == priv) {
+ if (c_node->dev == file_priv->ipp_dev) {
/*
* userland goto unnormal state. process killed.
* and close the file.
@@ -1832,7 +1714,6 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
mutex_unlock(&ippdrv->cmd_lock);
}
- kfree(priv);
return;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 7aaeaae757c2..6f48d62aeb30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,7 @@ struct drm_exynos_ipp_cmd_work {
/*
* A structure of command node.
*
- * @priv: IPP private information.
+ * @dev: IPP device.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
@@ -64,7 +64,7 @@ struct drm_exynos_ipp_cmd_work {
* @state: state of command node.
*/
struct drm_exynos_ipp_cmd_node {
- struct exynos_drm_ipp_private *priv;
+ struct device *dev;
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index f01fbb6dc1f0..55af6b41c1df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -691,6 +691,7 @@ static const struct of_device_id exynos_rotator_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, exynos_rotator_match);
static int rotator_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 2fb8705d6461..9528d81d8004 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -562,7 +562,7 @@ static int vidi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index aa259b0a873a..562966db2aa1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -84,6 +84,7 @@ struct hdmi_resources {
struct clk *sclk_hdmiphy;
struct clk *mout_hdmi;
struct regulator_bulk_data *regul_bulk;
+ struct regulator *reg_hdmi_en;
int regul_count;
};
@@ -592,6 +593,13 @@ static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
.is_apb_phy = 0,
};
+static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
+ .type = HDMI_TYPE13,
+ .phy_confs = hdmiphy_v13_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
+ .is_apb_phy = 0,
+};
+
static struct hdmi_driver_data exynos5_hdmi_driver_data = {
.type = HDMI_TYPE14,
.phy_confs = hdmiphy_v13_configs,
@@ -1129,7 +1137,7 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1241,14 +1249,13 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
static void hdmi_audio_init(struct hdmi_context *hdata)
{
- u32 sample_rate, bits_per_sample, frame_size_code;
+ u32 sample_rate, bits_per_sample;
u32 data_num, bit_ch, sample_frq;
u32 val;
u8 acr[7];
sample_rate = 44100;
bits_per_sample = 16;
- frame_size_code = 0;
switch (bits_per_sample) {
case 20:
@@ -2168,7 +2175,6 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
struct device *dev = hdata->dev;
struct hdmi_resources *res = &hdata->res;
static char *supply[] = {
- "hdmi-en",
"vdd",
"vdd_osc",
"vdd_pll",
@@ -2228,6 +2234,20 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
}
res->regul_count = ARRAY_SIZE(supply);
+ res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en");
+ if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) {
+ DRM_ERROR("failed to get hdmi-en regulator\n");
+ return PTR_ERR(res->reg_hdmi_en);
+ }
+ if (!IS_ERR(res->reg_hdmi_en)) {
+ ret = regulator_enable(res->reg_hdmi_en);
+ if (ret) {
+ DRM_ERROR("failed to enable hdmi-en regulator\n");
+ return ret;
+ }
+ } else
+ res->reg_hdmi_en = NULL;
+
return ret;
fail:
DRM_ERROR("HDMI resource init - failed\n");
@@ -2263,6 +2283,9 @@ static struct of_device_id hdmi_match_types[] = {
.compatible = "samsung,exynos5-hdmi",
.data = &exynos5_hdmi_driver_data,
}, {
+ .compatible = "samsung,exynos4210-hdmi",
+ .data = &exynos4210_hdmi_driver_data,
+ }, {
.compatible = "samsung,exynos4212-hdmi",
.data = &exynos4212_hdmi_driver_data,
}, {
@@ -2272,6 +2295,7 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+MODULE_DEVICE_TABLE (of, hdmi_match_types);
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
@@ -2494,7 +2518,11 @@ static int hdmi_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&hdata->hotplug_work);
- put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->res.reg_hdmi_en)
+ regulator_disable(hdata->res.reg_hdmi_en);
+
+ if (hdata->hdmiphy_port)
+ put_device(&hdata->hdmiphy_port->dev);
put_device(&hdata->ddc_adpt->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7529946d0a74..e8b4ec84b312 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -76,7 +76,7 @@ struct mixer_resources {
struct clk *vp;
struct clk *sclk_mixer;
struct clk *sclk_hdmi;
- struct clk *sclk_dac;
+ struct clk *mout_mixer;
};
enum mixer_version_id {
@@ -93,6 +93,7 @@ struct mixer_context {
bool interlace;
bool powered;
bool vp_enabled;
+ bool has_sclk;
u32 int_en;
struct mutex mixer_mutex;
@@ -106,6 +107,7 @@ struct mixer_context {
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
+ bool has_sclk;
};
static const u8 filter_y_horiz_tap8[] = {
@@ -363,6 +365,11 @@ static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
mixer_reg_writemask(res, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
+
+ /* control blending of graphic layer 0 */
+ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val,
+ MXR_GRP_CFG_BLEND_PRE_MUL |
+ MXR_GRP_CFG_PIXEL_BLEND_EN);
}
break;
}
@@ -809,19 +816,23 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
dev_err(dev, "failed to get clock 'vp'\n");
return -ENODEV;
}
- mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
- if (IS_ERR(mixer_res->sclk_mixer)) {
- dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- return -ENODEV;
- }
- mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
- if (IS_ERR(mixer_res->sclk_dac)) {
- dev_err(dev, "failed to get clock 'sclk_dac'\n");
- return -ENODEV;
- }
- if (mixer_res->sclk_hdmi)
- clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+ if (mixer_ctx->has_sclk) {
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ if (IS_ERR(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ return -ENODEV;
+ }
+ mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
+ if (IS_ERR(mixer_res->mout_mixer)) {
+ dev_err(dev, "failed to get clock 'mout_mixer'\n");
+ return -ENODEV;
+ }
+
+ if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
+ clk_set_parent(mixer_res->mout_mixer,
+ mixer_res->sclk_hdmi);
+ }
res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
@@ -1082,7 +1093,8 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
clk_prepare_enable(res->mixer);
if (ctx->vp_enabled) {
clk_prepare_enable(res->vp);
- clk_prepare_enable(res->sclk_mixer);
+ if (ctx->has_sclk)
+ clk_prepare_enable(res->sclk_mixer);
}
mutex_lock(&ctx->mixer_mutex);
@@ -1121,7 +1133,8 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
clk_disable_unprepare(res->mixer);
if (ctx->vp_enabled) {
clk_disable_unprepare(res->vp);
- clk_disable_unprepare(res->sclk_mixer);
+ if (ctx->has_sclk)
+ clk_disable_unprepare(res->sclk_mixer);
}
pm_runtime_put_sync(ctx->dev);
@@ -1189,9 +1202,15 @@ static struct mixer_drv_data exynos5250_mxr_drv_data = {
.is_vp_enabled = 0,
};
+static struct mixer_drv_data exynos4212_mxr_drv_data = {
+ .version = MXR_VER_0_0_0_16,
+ .is_vp_enabled = 1,
+};
+
static struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
+ .has_sclk = 1,
};
static struct platform_device_id mixer_driver_types[] = {
@@ -1208,6 +1227,12 @@ static struct platform_device_id mixer_driver_types[] = {
static struct of_device_id mixer_match_types[] = {
{
+ .compatible = "samsung,exynos4210-mixer",
+ .data = &exynos4210_mxr_drv_data,
+ }, {
+ .compatible = "samsung,exynos4212-mixer",
+ .data = &exynos4212_mxr_drv_data,
+ }, {
.compatible = "samsung,exynos5-mixer",
.data = &exynos5250_mxr_drv_data,
}, {
@@ -1220,6 +1245,7 @@ static struct of_device_id mixer_match_types[] = {
/* end node */
}
};
+MODULE_DEVICE_TABLE(of, mixer_match_types);
static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
@@ -1251,6 +1277,7 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
ctx->pdev = pdev;
ctx->dev = dev;
ctx->vp_enabled = drv->is_vp_enabled;
+ ctx->has_sclk = drv->has_sclk;
ctx->mxr_ver = drv->version;
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index c18268cd516e..248c33a35ebf 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -192,7 +192,7 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
psb_intel_i2c_destroy(gma_encoder->ddc_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -304,7 +304,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
drm_connector_helper_add(connector,
&cdv_intel_crt_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9ff30c2efadb..a4cc0e60a1be 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1713,7 +1713,7 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
}
}
i2c_del_adapter(&intel_dp->adapter);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1847,7 +1847,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
/* Set up the DDC bus. */
switch (output_reg) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index b99084b3f706..4268bf210034 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -248,7 +248,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
if (gma_encoder->i2c_bus)
psb_intel_i2c_destroy(gma_encoder->i2c_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -356,7 +356,7 @@ void cdv_hdmi_init(struct drm_device *dev,
hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
hdmi_priv->dev = dev;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8ecc920fc26d..0b770396548c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -446,7 +446,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector)
if (gma_encoder->i2c_bus)
psb_intel_i2c_destroy(gma_encoder->i2c_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -774,7 +774,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
out:
mutex_unlock(&dev->mode_config.mutex);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_find:
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index e7fcc148f333..d0dd3bea8aa5 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -561,7 +561,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
return psbfb_create(psb_fbdev, sizes);
}
-static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.gamma_set = psbfb_gamma_set,
.gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
@@ -600,7 +600,8 @@ int psb_fbdev_init(struct drm_device *dev)
}
dev_priv->fbdev = fbdev;
- fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+ drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
INTELFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 592d205a0089..ce015db59dc6 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
WARN_ON(gt->pages);
- pages = drm_gem_get_pages(&gt->gem, 0);
+ pages = drm_gem_get_pages(&gt->gem);
if (IS_ERR(pages))
return PTR_ERR(pages);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 6e91b20ce2e5..abf2248da61e 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -318,7 +318,7 @@ static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
if (!dsi_connector)
return;
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
sender = dsi_connector->pkg_sender;
mdfld_dsi_pkg_sender_destroy(sender);
@@ -597,7 +597,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dsi_config->encoder = encoder;
encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
INTEL_OUTPUT_MIPI2;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
/*TODO: add code to destroy outputs on error*/
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index a97e38e284fa..d75ecb3bdee7 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -39,7 +39,6 @@ static void mid_get_fuse_settings(struct drm_device *dev)
#define FB_REG06 0xD0810600
#define FB_MIPI_DISABLE (1 << 11)
#define FB_REG09 0xD0810900
-#define FB_REG09 0xD0810900
#define FB_SKU_MASK 0x7000
#define FB_SKU_SHIFT 12
#define FB_SKU_100 0
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index cf018ddcc5a6..e6f5c620a0a2 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -665,7 +665,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
dev_info(dev->dev, "HDMI initialised.\n");
return;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 9b099468a5db..0d39da6e8b7a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -404,7 +404,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
out:
mutex_unlock(&dev->mode_config.mutex);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index d7778d0472c1..88aad95bde09 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -563,7 +563,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector)
if (lvds_priv->ddc_bus)
psb_intel_i2c_destroy(lvds_priv->ddc_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -829,7 +829,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
*/
out:
mutex_unlock(&dev->mode_config.mutex);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index deeb0829b129..0be96fdb5e28 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1682,7 +1682,7 @@ static void psb_intel_sdvo_destroy(struct drm_connector *connector)
psb_intel_sdvo_connector->tv_format);
psb_intel_sdvo_destroy_enhance_property(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -2071,7 +2071,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
gma_connector_attach_encoder(&connector->base, &encoder->base);
- drm_sysfs_connector_add(&connector->base.base);
+ drm_connector_register(&connector->base.base);
}
static void
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index ac357b02bd35..d4762799351d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -15,8 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-
-
+#include <linux/component.h>
#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/irq.h>
@@ -730,12 +729,9 @@ tda998x_configure_audio(struct tda998x_priv *priv,
/* DRM encoder functions */
-static void
-tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+static void tda998x_encoder_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
- struct tda998x_encoder_params *p = params;
-
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
(p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
VIP_CNTRL_0_SWAP_B(p->swap_b) |
@@ -752,11 +748,8 @@ tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
priv->params = *p;
}
-static void
-tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tda998x_encoder_dpms(struct tda998x_priv *priv, int mode)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
/* we only care about on or off: */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
@@ -806,9 +799,8 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static int
-tda998x_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int tda998x_encoder_mode_valid(struct tda998x_priv *priv,
+ struct drm_display_mode *mode)
{
if (mode->clock > 150000)
return MODE_CLOCK_HIGH;
@@ -820,11 +812,10 @@ tda998x_encoder_mode_valid(struct drm_encoder *encoder,
}
static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+tda998x_encoder_mode_set(struct tda998x_priv *priv,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint16_t ref_pix, ref_line, n_pix, n_line;
uint16_t hs_pix_s, hs_pix_e;
uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1012,20 +1003,16 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
static enum drm_connector_status
-tda998x_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+tda998x_encoder_detect(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
connector_status_disconnected;
}
-static int
-read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint8_t offset, segptr;
int ret, i;
@@ -1079,10 +1066,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
return 0;
}
-static uint8_t *
-do_get_edid(struct drm_encoder *encoder)
+static uint8_t *do_get_edid(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
int j, valid_extensions = 0;
uint8_t *block, *new;
bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -1094,7 +1079,7 @@ do_get_edid(struct drm_encoder *encoder)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
/* base block fetch */
- if (read_edid_block(encoder, block, 0))
+ if (read_edid_block(priv, block, 0))
goto fail;
if (!drm_edid_block_valid(block, 0, print_bad_edid))
@@ -1111,7 +1096,7 @@ do_get_edid(struct drm_encoder *encoder)
for (j = 1; j <= block[0x7e]; j++) {
uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
- if (read_edid_block(encoder, ext_block, j))
+ if (read_edid_block(priv, ext_block, j))
goto fail;
if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
@@ -1144,11 +1129,10 @@ fail:
}
static int
-tda998x_encoder_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+tda998x_encoder_get_modes(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
- struct edid *edid = (struct edid *)do_get_edid(encoder);
+ struct edid *edid = (struct edid *)do_get_edid(priv);
int n = 0;
if (edid) {
@@ -1161,18 +1145,14 @@ tda998x_encoder_get_modes(struct drm_encoder *encoder,
return n;
}
-static int
-tda998x_encoder_create_resources(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
if (priv->hdmi->irq)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
- return 0;
}
static int
@@ -1185,66 +1165,97 @@ tda998x_encoder_set_property(struct drm_encoder *encoder,
return 0;
}
-static void
-tda998x_encoder_destroy(struct drm_encoder *encoder)
+static void tda998x_destroy(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
- if (priv->cec)
- i2c_unregister_device(priv->cec);
+ i2c_unregister_device(priv->cec);
+}
+
+/* Slave encoder support */
+
+static void
+tda998x_encoder_slave_set_config(struct drm_encoder *encoder, void *params)
+{
+ tda998x_encoder_set_config(to_tda998x_priv(encoder), params);
+}
+
+static void tda998x_encoder_slave_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ tda998x_destroy(priv);
drm_i2c_encoder_destroy(encoder);
kfree(priv);
}
-static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
- .set_config = tda998x_encoder_set_config,
- .destroy = tda998x_encoder_destroy,
- .dpms = tda998x_encoder_dpms,
- .save = tda998x_encoder_save,
- .restore = tda998x_encoder_restore,
- .mode_fixup = tda998x_encoder_mode_fixup,
- .mode_valid = tda998x_encoder_mode_valid,
- .mode_set = tda998x_encoder_mode_set,
- .detect = tda998x_encoder_detect,
- .get_modes = tda998x_encoder_get_modes,
- .create_resources = tda998x_encoder_create_resources,
- .set_property = tda998x_encoder_set_property,
-};
+static void tda998x_encoder_slave_dpms(struct drm_encoder *encoder, int mode)
+{
+ tda998x_encoder_dpms(to_tda998x_priv(encoder), mode);
+}
-/* I2C driver functions */
+static int tda998x_encoder_slave_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ return tda998x_encoder_mode_valid(to_tda998x_priv(encoder), mode);
+}
-static int
-tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static void
+tda998x_encoder_slave_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- return 0;
+ tda998x_encoder_mode_set(to_tda998x_priv(encoder), mode, adjusted_mode);
+}
+
+static enum drm_connector_status
+tda998x_encoder_slave_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return tda998x_encoder_detect(to_tda998x_priv(encoder));
+}
+
+static int tda998x_encoder_slave_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return tda998x_encoder_get_modes(to_tda998x_priv(encoder), connector);
}
static int
-tda998x_remove(struct i2c_client *client)
+tda998x_encoder_slave_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
+ tda998x_encoder_set_polling(to_tda998x_priv(encoder), connector);
return 0;
}
-static int
-tda998x_encoder_init(struct i2c_client *client,
- struct drm_device *dev,
- struct drm_encoder_slave *encoder_slave)
+static struct drm_encoder_slave_funcs tda998x_encoder_slave_funcs = {
+ .set_config = tda998x_encoder_slave_set_config,
+ .destroy = tda998x_encoder_slave_destroy,
+ .dpms = tda998x_encoder_slave_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .mode_valid = tda998x_encoder_slave_mode_valid,
+ .mode_set = tda998x_encoder_slave_mode_set,
+ .detect = tda998x_encoder_slave_detect,
+ .get_modes = tda998x_encoder_slave_get_modes,
+ .create_resources = tda998x_encoder_slave_create_resources,
+ .set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
{
- struct tda998x_priv *priv;
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
@@ -1252,17 +1263,11 @@ tda998x_encoder_init(struct i2c_client *client,
priv->current_page = 0xff;
priv->hdmi = client;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
- if (!priv->cec) {
- kfree(priv);
+ if (!priv->cec)
return -ENODEV;
- }
- priv->encoder = &encoder_slave->base;
priv->dpms = DRM_MODE_DPMS_OFF;
- encoder_slave->slave_priv = priv;
- encoder_slave->slave_funcs = &tda998x_encoder_funcs;
-
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1365,12 +1370,231 @@ fail:
*/
if (priv->cec)
i2c_unregister_device(priv->cec);
- kfree(priv);
- encoder_slave->slave_priv = NULL;
- encoder_slave->slave_funcs = NULL;
return -ENXIO;
}
+static int tda998x_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder_slave)
+{
+ struct tda998x_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->encoder = &encoder_slave->base;
+
+ ret = tda998x_create(client, priv);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ encoder_slave->slave_priv = priv;
+ encoder_slave->slave_funcs = &tda998x_encoder_slave_funcs;
+
+ return 0;
+}
+
+struct tda998x_priv2 {
+ struct tda998x_priv base;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+#define conn_to_tda998x_priv2(x) \
+ container_of(x, struct tda998x_priv2, connector);
+
+#define enc_to_tda998x_priv2(x) \
+ container_of(x, struct tda998x_priv2, encoder);
+
+static void tda998x_encoder2_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_encoder_dpms(&priv->base, mode);
+}
+
+static void tda998x_encoder_prepare(struct drm_encoder *encoder)
+{
+ tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void tda998x_encoder_commit(struct drm_encoder *encoder)
+{
+ tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void tda998x_encoder2_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_encoder_mode_set(&priv->base, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
+ .dpms = tda998x_encoder2_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .prepare = tda998x_encoder_prepare,
+ .commit = tda998x_encoder_commit,
+ .mode_set = tda998x_encoder2_mode_set,
+};
+
+static void tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_destroy(&priv->base);
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs tda998x_encoder_funcs = {
+ .destroy = tda998x_encoder_destroy,
+};
+
+static int tda998x_connector_get_modes(struct drm_connector *connector)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_get_modes(&priv->base, connector);
+}
+
+static int tda998x_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_mode_valid(&priv->base, mode);
+}
+
+static struct drm_encoder *
+tda998x_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return &priv->encoder;
+}
+
+static
+const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
+ .get_modes = tda998x_connector_get_modes,
+ .mode_valid = tda998x_connector_mode_valid,
+ .best_encoder = tda998x_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tda998x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_detect(&priv->base);
+}
+
+static void tda998x_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tda998x_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = tda998x_connector_detect,
+ .destroy = tda998x_connector_destroy,
+};
+
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+ struct tda998x_encoder_params *params = dev->platform_data;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct drm_device *drm = data;
+ struct tda998x_priv2 *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->base.encoder = &priv->encoder;
+ priv->connector.interlace_allowed = 1;
+ priv->encoder.possible_crtcs = 1 << 0;
+
+ ret = tda998x_create(client, &priv->base);
+ if (ret)
+ return ret;
+
+ if (!dev->of_node && params)
+ tda998x_encoder_set_config(&priv->base, params);
+
+ tda998x_encoder_set_polling(&priv->base, &priv->connector);
+
+ drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
+ ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ goto err_encoder;
+
+ drm_connector_helper_add(&priv->connector,
+ &tda998x_connector_helper_funcs);
+ ret = drm_connector_init(drm, &priv->connector,
+ &tda998x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ goto err_connector;
+
+ ret = drm_connector_register(&priv->connector);
+ if (ret)
+ goto err_sysfs;
+
+ priv->connector.encoder = &priv->encoder;
+ drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+
+ return 0;
+
+err_sysfs:
+ drm_connector_cleanup(&priv->connector);
+err_connector:
+ drm_encoder_cleanup(&priv->encoder);
+err_encoder:
+ tda998x_destroy(&priv->base);
+ return ret;
+}
+
+static void tda998x_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct tda998x_priv2 *priv = dev_get_drvdata(dev);
+
+ drm_connector_cleanup(&priv->connector);
+ drm_encoder_cleanup(&priv->encoder);
+ tda998x_destroy(&priv->base);
+}
+
+static const struct component_ops tda998x_ops = {
+ .bind = tda998x_bind,
+ .unbind = tda998x_unbind,
+};
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ return component_add(&client->dev, &tda998x_ops);
+}
+
+static int tda998x_remove(struct i2c_client *client)
+{
+ component_del(&client->dev, &tda998x_ops);
+ return 0;
+}
+
#ifdef CONFIG_OF
static const struct of_device_id tda998x_dt_ids[] = {
{ .compatible = "nxp,tda998x", },
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index e88bac1d781f..bae897de9468 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -393,15 +393,14 @@ static int i810_dma_initialize(struct drm_device *dev,
/* Program Hardware Status Page */
dev_priv->hw_status_page =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE,
- &dev_priv->dma_status_page);
+ pci_zalloc_consistent(dev->pdev, PAGE_SIZE,
+ &dev_priv->dma_status_page);
if (!dev_priv->hw_status_page) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I810_WRITE(0x02080, dev_priv->dma_status_page);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 437e1824d0bf..4e39ab34eb1c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
-
-config DRM_I915_UMS
- bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
- depends on DRM_I915 && BROKEN
- default n
- help
- Choose this option if you still need userspace modesetting.
-
- Userspace modesetting is deprecated for quite some time now, so
- enable this only if you have ancient versions of the DDX drivers.
-
- If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cad1683d8bb5..91bd167e1cb7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
+ intel_dp_mst.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9d7954366bd2..dea99d92fb4a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = {
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
+ GEN7_L3SQCREG1,
+ GEN7_L3CNTLREG2,
+ GEN7_L3CNTLREG3,
};
static const u32 gen7_blt_regs[] = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b8c689202c40..9e737b771c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -170,11 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->frontbuffer_bits)
+ seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
- seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+ seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' ');
}
@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -985,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set,
"0x%llx\n");
-static int i915_rstdby_delays(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 crstanddelay;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- crstanddelay = I915_READ16(CRSTANDVID);
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
-
- return 0;
-}
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1029,7 +1015,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
+ IS_BROADWELL(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1048,7 +1035,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
reqf >>= 24;
else
reqf >>= 25;
@@ -1065,7 +1052,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1121,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) {
- u32 freq_sts, val;
+ u32 freq_sts;
mutex_lock(&dev_priv->rps.hw_lock);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
- val = valleyview_rps_max_freq(dev_priv);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
- val = valleyview_rps_min_freq(dev_priv);
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+
+ seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
@@ -1148,61 +1136,6 @@ out:
return ret;
}
-static int i915_delayfreq_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 delayfreq;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 0; i < 16; i++) {
- delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
- seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
- (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
- }
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static inline int MAP_TO_MV(int map)
-{
- return 1250 - (map * 25);
-}
-
-static int i915_inttoext_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 inttoext;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 1; i <= 32; i++) {
- inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
- seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
@@ -1513,10 +1446,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "enabled\n");
- else
- seq_puts(m, "disabled\n");
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915.enable_ips));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (I915_READ(IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
intel_runtime_pm_put(dev_priv);
@@ -1620,26 +1560,6 @@ out:
return ret;
}
-static int i915_gfxec(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1677,9 +1597,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
@@ -1692,7 +1609,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
- mutex_unlock(&dev->mode_config.mutex);
#endif
mutex_lock(&dev->mode_config.fb_lock);
@@ -1723,7 +1639,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ctx;
int ret, i;
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1740,7 +1656,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (ctx->obj == NULL)
+ if (ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_puts(m, "HW context ");
@@ -1749,11 +1665,11 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name);
- describe_obj(m, ctx->obj);
+ describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n');
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1863,7 +1779,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n");
else
- seq_printf(m, " context %d:\n", ctx->id);
+ seq_printf(m, " context %d:\n", ctx->user_handle);
ppgtt->debug_dump(ppgtt, m);
return 0;
@@ -1976,17 +1892,25 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+ seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+ seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
+ seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
+ dev_priv->psr.busy_frontbuffer_bits);
+ seq_printf(m, "Re-enable work scheduled: %s\n",
+ yesno(work_busy(&dev_priv->psr.work.work)));
enabled = HAS_PSR(dev) &&
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "Enabled: %s\n", yesno(enabled));
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (HAS_PSR(dev))
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -2072,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
- yesno(dev_priv->pm.irqs_disabled));
+ yesno(!intel_irqs_enabled(dev_priv)));
return 0;
}
@@ -2126,6 +2050,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "VGA";
case POWER_DOMAIN_AUDIO:
return "AUDIO";
+ case POWER_DOMAIN_PLLS:
+ return "PLLS";
case POWER_DOMAIN_INIT:
return "INIT";
default:
@@ -2223,9 +2149,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
- crtc->primary->fb->base.id, crtc->x, crtc->y,
- crtc->primary->fb->width, crtc->primary->fb->height);
+ if (crtc->primary->fb)
+ seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+ crtc->primary->fb->base.id, crtc->x, crtc->y,
+ crtc->primary->fb->width, crtc->primary->fb->height);
+ else
+ seq_puts(m, "\tprimary plane disabled\n");
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
intel_encoder_info(m, intel_crtc, intel_encoder);
}
@@ -2287,13 +2216,15 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "\tCEA rev: %d\n",
connector->display_info.cea_rev);
}
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
- intel_dp_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
- intel_hdmi_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
+ if (intel_encoder) {
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+ intel_hdmi_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ }
seq_printf(m, "\tmodes:\n");
list_for_each_entry(mode, &connector->modes, head)
@@ -2347,17 +2278,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
bool active;
int x, y;
- seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+ seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active));
+ yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
if (crtc->active) {
intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
+ seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_addr,
- yesno(active));
+ x, y, crtc->cursor_width, crtc->cursor_height,
+ crtc->cursor_addr, yesno(active));
}
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -2377,12 +2308,132 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_semaphore_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ int i, j, ret;
+
+ if (!i915_semaphore_is_enabled(dev)) {
+ seq_puts(m, "Semaphores are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ if (IS_BROADWELL(dev)) {
+ struct page *page;
+ uint64_t *seqno;
+
+ page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+
+ seqno = (uint64_t *)kmap_atomic(page);
+ for_each_ring(ring, dev_priv, i) {
+ uint64_t offset;
+
+ seq_printf(m, "%s\n", ring->name);
+
+ seq_puts(m, " Last signal:");
+ for (j = 0; j < num_rings; j++) {
+ offset = i * I915_NUM_RINGS + j;
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ seq_puts(m, " Last wait: ");
+ for (j = 0; j < num_rings; j++) {
+ offset = i + (j * I915_NUM_RINGS);
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ }
+ kunmap_atomic(seqno);
+ } else {
+ seq_puts(m, " Last signal:");
+ for_each_ring(ring, dev_priv, i)
+ for (j = 0; j < num_rings; j++)
+ seq_printf(m, "0x%08x\n",
+ I915_READ(ring->semaphore.mbox.signal[j]));
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, "\nSync seqno:\n");
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < num_rings; j++) {
+ seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
+ }
+ seq_putc(m, '\n');
+ }
+ seq_putc(m, '\n');
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+ seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
+ pll->active, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
enum pipe pipe;
};
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = to_intel_encoder(encoder);
+ if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+ continue;
+ intel_dig_port = enc_to_dig_port(encoder);
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ }
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
@@ -2849,7 +2900,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
+ !crtc->config.pch_pfit.enabled) {
+ crtc->config.pch_pfit.force_thru = true;
+
+ intel_display_power_get(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.pch_pfit.force_thru) {
+ crtc->config.pch_pfit.force_thru = false;
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+
+ intel_display_power_put(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
uint32_t *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -2863,6 +2967,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
+ if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev);
+
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -2895,11 +3002,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (INTEL_INFO(dev)->gen < 5)
ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_VALLEYVIEW(dev))
- ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+ ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ret = ilk_pipe_crc_ctl_reg(&source, &val);
else
- ret = ivb_pipe_crc_ctl_reg(&source, &val);
+ ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
return ret;
@@ -2929,11 +3036,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
- intel_wait_for_vblank(dev, pipe);
+ drm_modeset_lock(&crtc->base.mutex, NULL);
+ if (crtc->active)
+ intel_wait_for_vblank(dev, pipe);
+ drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
@@ -2946,6 +3058,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
g4x_undo_pipe_scramble_reset(dev, pipe);
else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe);
+ else if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_undo_trans_edp_pipe_A_crc_wa(dev);
}
return 0;
@@ -3177,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev);
@@ -3187,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, spr_wm_latency_show, dev);
@@ -3197,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, cur_wm_latency_show, dev);
@@ -3506,7 +3620,7 @@ i915_max_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3532,7 +3646,7 @@ i915_max_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3549,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3587,7 +3701,7 @@ i915_min_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3613,7 +3727,7 @@ i915_min_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3630,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3799,14 +3913,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
- {"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_delayfreq_table", i915_delayfreq_table, 0},
- {"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -3823,6 +3933,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_pc8_status", i915_pc8_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_semaphore_status", i915_semaphore_status, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d44344140627..2e7f03ad5ee2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev)
I915_WRITE(HWS_PGA, 0x1ffff000);
}
-void i915_kernel_lost_context(struct drm_device * dev)
+void i915_kernel_lost_context(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
-static int i915_dma_cleanup(struct drm_device * dev)
+static int i915_dma_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
return 0;
}
-static int i915_dma_resume(struct drm_device * dev)
+static int i915_dma_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
@@ -359,7 +359,7 @@ static int validate_cmd(int cmd)
return 0;
}
-static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
+
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
}
-static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device *dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_batchbuffer(struct drm_device * dev,
- drm_i915_batchbuffer_t * batch,
+static int i915_dispatch_batchbuffer(struct drm_device *dev,
+ drm_i915_batchbuffer_t *batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_flip(struct drm_device * dev)
+static int i915_dispatch_flip(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
@@ -755,7 +756,7 @@ fail_batch_free:
return ret;
}
-static int i915_emit_irq(struct drm_device * dev)
+static int i915_emit_irq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->dri1.counter;
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device *dev, int irq_nr)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1338,6 +1340,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ dev_priv->pm._irqs_disabled = false;
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -1375,9 +1379,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
intel_fbdev_initial_config(dev);
- /* Only enable hotplug handling once the fbdev is fully set up. */
- dev_priv->enable_hotplug_processing = true;
-
drm_kms_helper_poll_init(dev);
return 0;
@@ -1425,15 +1426,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
}
#if IS_ENABLED(CONFIG_FB)
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
bool primary;
+ int ret;
ap = alloc_apertures(1);
if (!ap)
- return;
+ return -ENOMEM;
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -1441,13 +1443,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
- remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
kfree(ap);
+
+ return ret;
}
#else
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
+ return 0;
}
#endif
@@ -1492,10 +1497,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
+ dev_priv->dev->pdev->revision,
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
@@ -1594,7 +1600,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv == NULL)
return -ENOMEM;
- dev->dev_private = (void *)dev_priv;
+ dev->dev_private = dev_priv;
dev_priv->dev = dev;
/* copy initial configuration to dev_priv->info */
@@ -1606,6 +1612,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
@@ -1664,7 +1671,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gtt;
}
- i915_kick_out_firmware_fb(dev_priv);
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_gtt;
+ }
}
pci_set_master(dev->pdev);
@@ -1717,6 +1728,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
+ dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->dp_wq == NULL) {
+ DRM_ERROR("Failed to create our dp workqueue.\n");
+ ret = -ENOMEM;
+ goto out_freewq;
+ }
+
intel_irq_init(dev);
intel_uncore_sanitize(dev);
@@ -1792,6 +1810,8 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->dp_wq);
+out_freewq:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1892,6 +1912,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1933,7 +1954,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
-void i915_driver_lastclose(struct drm_device * dev)
+void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1954,11 +1975,11 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
-void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{
mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file_priv);
- i915_gem_release(dev, file_priv);
+ i915_gem_context_close(dev, file);
+ i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
}
@@ -2031,7 +2052,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
* manage the gtt, we need to claim that all intel devices are agp. For
* otherwise the drm core refuses to initialize the agp support code.
*/
-int i915_driver_device_is_agp(struct drm_device * dev)
+int i915_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 651e65e051c0..ec96f9a9724c 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,6 +28,7 @@
*/
#include <linux/device.h>
+#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -46,8 +47,6 @@ static struct drm_driver driver;
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
#define GEN_CHV_PIPEOFFSETS \
@@ -55,10 +54,6 @@ static struct drm_driver driver;
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
- CHV_DPLL_C_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
- CHV_DPLL_C_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
@@ -308,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -319,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -330,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -341,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -499,8 +498,7 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
-
- intel_runtime_pm_get(dev_priv);
+ pci_power_t opregion_target_state;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -526,21 +524,23 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
- drm_irq_uninstall(dev);
- dev_priv->enable_hotplug_processing = false;
-
- intel_disable_gt_powersave(dev);
-
/*
* Disable CRTCs directly since we want to preserve sw state
- * for _thaw.
+ * for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc) {
- dev_priv->display.crtc_disable(crtc);
- }
+ for_each_crtc(dev, crtc)
+ intel_crtc_control(crtc, false);
drm_modeset_unlock_all(dev);
+ intel_dp_mst_suspend(dev);
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_disable_interrupts(dev);
+
+ intel_suspend_gt_powersave(dev);
+
intel_modeset_suspend_hw(dev);
}
@@ -548,8 +548,15 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
+ opregion_target_state = PCI_D3cold;
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ opregion_target_state = PCI_D1;
+#endif
+ intel_opregion_notify_adapter(dev, opregion_target_state);
+
+ intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
- intel_uncore_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
@@ -557,6 +564,8 @@ static int i915_drm_freeze(struct drm_device *dev)
dev_priv->suspend_count++;
+ intel_display_set_init_power(dev_priv, false);
+
return 0;
}
@@ -606,7 +615,10 @@ static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_uncore_early_sanitize(dev);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hsw_disable_pc8(dev_priv);
+
+ intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
@@ -639,11 +651,19 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
- /* We need working interrupts for modeset enabling ... */
- drm_irq_install(dev, dev->pdev->irq);
+ intel_runtime_pm_restore_interrupts(dev);
intel_modeset_init_hw(dev);
+ {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
@@ -655,7 +675,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
* notifications.
* */
intel_hpd_init(dev);
- dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
@@ -678,7 +697,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_opregion_notify_adapter(dev, PCI_D0);
+
return 0;
}
@@ -887,6 +907,7 @@ static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -900,6 +921,9 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
+ hsw_enable_pc8(dev_priv);
+
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1f7700897dfc..4412f6a4383b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080730"
+#define DRIVER_DATE "20140725"
enum pipe {
INVALID_PIPE = -1,
@@ -129,6 +129,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -178,14 +179,20 @@ enum hpd_pin {
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
if ((intel_connector)->base.encoder == (__encoder))
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
struct drm_i915_private;
struct i915_mmu_object;
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B,
+ DPLL_ID_PCH_PLL_A = 0,
+ DPLL_ID_PCH_PLL_B = 1,
+ DPLL_ID_WRPLL1 = 0,
+ DPLL_ID_WRPLL2 = 1,
};
#define I915_NUM_PLLS 2
@@ -194,6 +201,7 @@ struct intel_dpll_hw_state {
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
+ uint32_t wrpll;
};
struct intel_shared_dpll {
@@ -204,6 +212,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
+ /* The mode_set hook is optional and should be used together with the
+ * intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
@@ -228,12 +238,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
-struct intel_ddi_plls {
- int spll_refcount;
- int wrpll1_refcount;
- int wrpll2_refcount;
-};
-
/* Interface history:
*
* 1.1: Original.
@@ -310,6 +314,7 @@ struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
+ u32 gtier[4];
u32 ccid;
u32 derrmr;
u32 forcewake;
@@ -324,6 +329,7 @@ struct drm_i915_error_state {
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
+ struct drm_i915_error_object *semaphore_obj;
struct drm_i915_error_ring {
bool valid;
@@ -435,8 +441,8 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enable, bool scaled);
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -552,8 +558,6 @@ struct intel_device_info {
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int dpll_offsets[I915_MAX_PIPES];
- int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
};
@@ -586,28 +590,48 @@ struct i915_ctx_hang_stats {
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
-#define DEFAULT_CONTEXT_ID 0
+#define DEFAULT_CONTEXT_HANDLE 0
+/**
+ * struct intel_context - as the name implies, represents a context.
+ * @ref: reference count.
+ * @user_handle: userspace tracking identity for this context.
+ * @remap_slice: l3 row remapping information.
+ * @file_priv: filp associated with this context (NULL for global default
+ * context).
+ * @hang_stats: information about the role of this context in possible GPU
+ * hangs.
+ * @vm: virtual memory space used by this context.
+ * @legacy_hw_ctx: render context backing object and whether it is correctly
+ * initialized (legacy ring submission mechanism only).
+ * @link: link in the global list of contexts.
+ *
+ * Contexts are memory images used by the hardware to store copies of their
+ * internal state.
+ */
struct intel_context {
struct kref ref;
- int id;
- bool is_initialized;
+ int user_handle;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_engine_cs *last_ring;
- struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
+ struct {
+ struct drm_i915_gem_object *rcs_state;
+ bool initialized;
+ } legacy_hw_ctx;
+
struct list_head link;
};
struct i915_fbc {
unsigned long size;
+ unsigned threshold;
unsigned int fb_id;
enum plane plane;
int y;
- struct drm_mm_node *compressed_fb;
+ struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
struct intel_fbc_work {
@@ -635,9 +659,15 @@ struct i915_drrs {
struct intel_connector *connector;
};
+struct intel_dp;
struct i915_psr {
+ struct mutex lock;
bool sink_support;
bool source_ok;
+ struct intel_dp *enabled;
+ bool active;
+ struct delayed_work work;
+ unsigned busy_frontbuffer_bits;
};
enum intel_pch {
@@ -880,6 +910,12 @@ struct vlv_s0ix_state {
u32 clock_gate_dis2;
};
+struct intel_rps_ei {
+ u32 cz_clock;
+ u32 render_c0;
+ u32 media_c0;
+};
+
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
@@ -903,6 +939,9 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
+ u32 cz_freq;
+
+ u32 ei_interrupt_count;
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -910,6 +949,9 @@ struct intel_gen6_power_mgmt {
bool enabled;
struct delayed_work delayed_resume_work;
+ /* manual wa residency calculations */
+ struct intel_rps_ei up_ei, down_ei;
+
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
@@ -1230,6 +1272,7 @@ struct intel_vbt_data {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
+ u8 min_brightness; /* min_brightness/255 of max */
} backlight;
/* MIPI DSI */
@@ -1299,7 +1342,7 @@ struct ilk_wm_values {
*/
struct i915_runtime_pm {
bool suspended;
- bool irqs_disabled;
+ bool _irqs_disabled;
};
enum intel_pipe_crc_source {
@@ -1332,6 +1375,17 @@ struct intel_pipe_crc {
wait_queue_head_t wq;
};
+struct i915_frontbuffer_tracking {
+ struct mutex lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1363,6 +1417,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_engine_cs ring[I915_NUM_RINGS];
+ struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -1371,6 +1426,9 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* protects the mmio flip data */
+ spinlock_t mmio_flip_lock;
+
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1390,7 +1448,6 @@ struct drm_i915_private {
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct work_struct hotplug_work;
- bool enable_hotplug_processing;
struct {
unsigned long hpd_last_jiffies;
int hpd_cnt;
@@ -1467,7 +1524,6 @@ struct drm_i915_private {
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */
@@ -1475,6 +1531,9 @@ struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
+
+ struct i915_frontbuffer_tracking fb_tracking;
+
u16 orig_clock;
bool mchbar_need_disable;
@@ -1541,6 +1600,20 @@ struct drm_i915_private {
struct i915_runtime_pm pm;
+ struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
+ u32 long_hpd_port_mask;
+ u32 short_hpd_port_mask;
+ struct work_struct dig_port_work;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
@@ -1592,6 +1665,28 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *);
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+ (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+ (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+ (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+ (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -1662,6 +1757,12 @@ struct drm_i915_gem_object {
unsigned int pin_display:1;
/*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+
+ /*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int pending_fenced_gpu_access:1;
@@ -1673,6 +1774,8 @@ struct drm_i915_gem_object {
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
+ unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
struct sg_table *pages;
int pages_pin_count;
@@ -1719,6 +1822,10 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Request queue structure.
*
@@ -1940,10 +2047,8 @@ struct drm_i915_cmd_table {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
- (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
- && !IS_GEN8(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
@@ -1998,6 +2103,8 @@ struct drm_i915_cmd_table {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@ -2040,6 +2147,8 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ int use_mmio_flip;
+ bool mmio_debug;
};
extern struct i915_params i915 __read_mostly;
@@ -2048,12 +2157,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2084,10 +2193,12 @@ extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2235,6 +2346,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
@@ -2404,7 +2517,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
static inline bool i915_gem_context_is_default(const struct intel_context *c)
{
- return c->id == DEFAULT_CONTEXT_ID;
+ return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2435,7 +2548,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
@@ -2445,7 +2558,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
-void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -2593,8 +2705,8 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
-extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
-extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+ bool enable);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
@@ -2605,6 +2717,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -2700,10 +2814,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
- if (HAS_PCH_SPLIT(dev))
- return CPU_VGACNTRL;
- else if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
+ else if (INTEL_INFO(dev)->gen >= 5)
+ return CPU_VGACNTRL;
else
return VGACNTRL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f247d922e44a..ba7f5c6bb50d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
-static int
+int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -1161,14 +1161,14 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
s64 before, now;
int ret;
- WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
+ WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
- if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
+ if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
@@ -1560,14 +1560,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unpin;
- obj->fault_mappable = true;
-
+ /* Finally, remap it using the new GTT offset */
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
pfn >>= PAGE_SHIFT;
- pfn += page_offset;
- /* Finally, remap it using the new GTT offset */
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ if (!obj->fault_mappable) {
+ unsigned long size = min_t(unsigned long,
+ vma->vm_end - vma->vm_start,
+ obj->base.size);
+ int i;
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
+
+ obj->fault_mappable = true;
+ } else
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vmf->virtual_address,
+ pfn + page_offset);
unpin:
i915_gem_object_ggtt_unpin(obj);
unlock:
@@ -2051,16 +2066,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
- gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
- gfp |= __GFP_IO | __GFP_WAIT;
-
i915_gem_shrink_all(dev_priv);
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto err_pages;
-
- gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
- gfp &= ~(__GFP_IO | __GFP_WAIT);
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -2209,6 +2218,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
+ intel_fb_obj_flush(obj, true);
+
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2318,7 +2329,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring);
+ request_start = intel_ring_get_tail(ring->buffer);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2339,7 +2350,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring);
+ request_ring_position = intel_ring_get_tail(ring->buffer);
ret = ring->add_request(ring);
if (ret)
@@ -2822,6 +2833,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno;
+ /* Optimization: Avoid semaphore sync when we are sure we already
+ * waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
@@ -2905,8 +2918,6 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->unbind_vma(vma);
- i915_gem_gtt_finish_object(obj);
-
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
@@ -2917,8 +2928,10 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list))
+ if (list_empty(&obj->vma_list)) {
+ i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ }
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3530,6 +3543,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3551,6 +3566,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3604,6 +3621,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -3940,6 +3960,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4428,13 +4451,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
+ WARN_ON(obj->frontbuffer_bits);
+
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
- i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
@@ -4521,7 +4545,7 @@ i915_gem_suspend(struct drm_device *dev)
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+ flush_delayed_work(&dev_priv->mm.idle_work);
return 0;
@@ -4912,6 +4936,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+
+ mutex_init(&dev_priv->fb_tracking.lock);
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4973,6 +4999,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits)
+{
+ if (old) {
+ WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+ WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+ old->frontbuffer_bits &= ~frontbuffer_bits;
+ }
+
+ if (new) {
+ WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+ WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+ new->frontbuffer_bits |= frontbuffer_bits;
+ }
+}
+
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
@@ -5055,12 +5098,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
- BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
}
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
return -1;
}
@@ -5141,8 +5185,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bool was_interruptible;
bool unlock;
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5ddf3bce9c3..3b99390e467a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -182,22 +182,50 @@ void i915_gem_context_free(struct kref *ctx_ref)
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
- if (ctx->obj) {
+ if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */
- if (USES_PPGTT(ctx->obj->base.dev))
+ if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx);
-
- /* XXX: Free up the object before tearing down the address space, in
- * case we're bound in the PPGTT */
- drm_gem_object_unreference(&ctx->obj->base);
}
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
+ if (ctx->legacy_hw_ctx.rcs_state)
+ drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
kfree(ctx);
}
+static struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ */
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ /* Failure shouldn't ever happen this early */
+ if (WARN_ON(ret)) {
+ drm_gem_object_unreference(&obj->base);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return obj;
+}
+
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
@@ -234,40 +262,26 @@ __create_hw_context(struct drm_device *dev,
list_add_tail(&ctx->link, &dev_priv->context_list);
if (dev_priv->hw_context_size) {
- ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
- if (ctx->obj == NULL) {
- ret = -ENOMEM;
+ struct drm_i915_gem_object *obj =
+ i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto err_out;
}
-
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- */
- if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
- ret = i915_gem_object_set_cache_level(ctx->obj,
- I915_CACHE_L3_LLC);
- /* Failure shouldn't ever happen this early */
- if (WARN_ON(ret))
- goto err_out;
- }
+ ctx->legacy_hw_ctx.rcs_state = obj;
}
/* Default context will never have a file_priv */
if (file_priv != NULL) {
ret = idr_alloc(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
+ DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
} else
- ret = DEFAULT_CONTEXT_ID;
+ ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
- ctx->id = ret;
+ ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -301,7 +315,7 @@ i915_gem_create_context(struct drm_device *dev,
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx && ctx->obj) {
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -309,7 +323,7 @@ i915_gem_create_context(struct drm_device *dev,
* be available. To avoid this we always pin the default
* context.
*/
- ret = i915_gem_obj_ggtt_pin(ctx->obj,
+ ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +363,8 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
err_unpin:
- if (is_global_default_ctx && ctx->obj)
- i915_gem_object_ggtt_unpin(ctx->obj);
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
+ i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
@@ -366,23 +380,27 @@ void i915_gem_context_reset(struct drm_device *dev)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context;
+ struct intel_context *lctx = ring->last_context;
/* Do a fake switch to the default context */
- if (ring->last_context == dctx)
+ if (lctx == dctx)
continue;
- if (!ring->last_context)
+ if (!lctx)
continue;
- if (dctx->obj && i == RCS) {
- WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+ if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
+ WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
- dctx->obj->base.write_domain = 0;
- dctx->obj->active = 0;
+ dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
+ dctx->legacy_hw_ctx.rcs_state->active = 0;
}
- i915_gem_context_unreference(ring->last_context);
+ if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
+ i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
+
+ i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx);
ring->last_context = dctx;
}
@@ -429,7 +447,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
- if (dctx->obj) {
+ if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
@@ -444,13 +462,13 @@ void i915_gem_context_fini(struct drm_device *dev)
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
- WARN_ON(dctx->obj->active);
- i915_gem_object_ggtt_unpin(dctx->obj);
+ WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
- i915_gem_object_ggtt_unpin(dctx->obj);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -570,7 +588,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -602,16 +620,16 @@ static int do_switch(struct intel_engine_cs *ring,
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
- BUG_ON(from->obj == NULL);
- BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+ BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
+ BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && from->last_ring == ring && !to->remap_slice)
+ if (from == to && !to->remap_slice)
return 0;
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
- ret = i915_gem_obj_ggtt_pin(to->obj,
+ ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
if (ret)
return ret;
@@ -644,17 +662,17 @@ static int do_switch(struct intel_engine_cs *ring,
*
* XXX: We need a real interface to do this instead of trickery.
*/
- ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret)
goto unpin_out;
- if (!to->obj->has_global_gtt_mapping) {
- struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
- vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
- if (!to->is_initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags);
@@ -680,8 +698,8 @@ static int do_switch(struct intel_engine_cs *ring,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
+ from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -689,21 +707,20 @@ static int do_switch(struct intel_engine_cs *ring,
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->obj->dirty = 1;
- BUG_ON(from->obj->ring != ring);
+ from->legacy_hw_ctx.rcs_state->dirty = 1;
+ BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->obj);
+ i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
- uninitialized = !to->is_initialized && from == NULL;
- to->is_initialized = true;
+ uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ to->legacy_hw_ctx.initialized = true;
done:
i915_gem_context_reference(to);
ring->last_context = to;
- to->last_ring = ring;
if (uninitialized) {
ret = i915_gem_render_state_init(ring);
@@ -715,7 +732,7 @@ done:
unpin_out:
if (ring->id == RCS)
- i915_gem_object_ggtt_unpin(to->obj);
+ i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@@ -736,7 +753,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (to->obj == NULL) { /* We have the fake context */
+ if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (ring->last_context)
@@ -774,7 +791,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- args->ctx_id = ctx->id;
+ args->ctx_id = ctx->user_handle;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
return 0;
@@ -788,7 +805,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
- if (args->ctx_id == DEFAULT_CONTEXT_ID)
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
@@ -801,7 +818,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(ctx);
}
- idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a30133f93e8..60998fc4e5b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+ if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
- /* check for potential scanout */
- if (i915_gem_obj_ggtt_bound(obj) &&
- i915_gem_obj_to_ggtt(obj)->pin_count)
- intel_mark_fb_busy(obj, ring);
+
+ intel_fb_obj_invalidate(obj, ring);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1028,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static int
+legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags)
+{
+ struct drm_clip_rect *cliprects = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 exec_len;
+ int instp_mode;
+ u32 instp_mask;
+ int i, ret = 0;
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ return -EINVAL;
+ }
+
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(cliprects,
+ to_user_ptr(args->cliprects_ptr),
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
+ if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+ DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ if (ret)
+ goto error;
+
+ ret = i915_switch_context(ring, ctx);
+ if (ret)
+ goto error;
+
+ instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ instp_mask = I915_EXEC_CONSTANTS_MASK;
+ switch (instp_mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (instp_mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("no rel constants on pre-gen4\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (ring == &dev_priv->ring[RCS] &&
+ instp_mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto error;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = instp_mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto error;
+ }
+
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto error;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto error;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ return ret;
+ }
+
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(vmas, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+error:
+ kfree(cliprects);
+ return ret;
+}
+
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
@@ -1087,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
- struct drm_clip_rect *cliprects = NULL;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u64 exec_start = args->batch_start_offset, exec_len;
- u32 mask, flags;
- int ret, mode, i;
+ u64 exec_start = args->batch_start_offset;
+ u32 flags;
+ int ret;
bool need_relocs;
if (!i915_gem_check_execbuffer(args))
@@ -1138,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- mask = I915_EXEC_CONSTANTS_MASK;
- switch (mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (mode != 0 && ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG("no rel constants on pre-gen4\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen > 5 &&
- mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev)->gen >= 6)
- mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
- return -EINVAL;
- }
-
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- if (args->num_cliprects != 0) {
- if (ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("clip rectangles are only valid with the render ring\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen >= 5) {
- DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
- return -EINVAL;
- }
-
- if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
- DRM_DEBUG("execbuf with %u cliprects\n",
- args->num_cliprects);
- return -EINVAL;
- }
-
- cliprects = kcalloc(args->num_cliprects,
- sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (copy_from_user(cliprects,
- to_user_ptr(args->cliprects_ptr),
- sizeof(*cliprects)*args->num_cliprects)) {
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- } else {
- if (args->DR4 == 0xffffffff) {
- DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
- args->DR4 = 0;
- }
-
- if (args->DR1 || args->DR4 || args->cliprects_ptr) {
- DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
- return -EINVAL;
- }
- }
-
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1322,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else
exec_start += i915_gem_obj_offset(batch_obj, vm);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
+ ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
+ args, &eb->vmas, batch_obj, exec_start, flags);
if (ret)
goto err;
- ret = i915_switch_context(ring, ctx);
- if (ret)
- goto err;
-
- if (ring == &dev_priv->ring[RCS] &&
- mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- goto err;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring, mask << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
- }
-
- if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
- if (ret)
- goto err;
- }
-
-
- exec_len = args->batch_len;
- if (cliprects) {
- for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(dev, &cliprects[i],
- args->DR1, args->DR4);
- if (ret)
- goto err;
-
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
- } else {
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
-
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
-
- i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
@@ -1387,8 +1413,6 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
- kfree(cliprects);
-
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
@@ -1525,7 +1549,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- struct drm_i915_gem_exec_object2 *user_exec_list =
+ struct drm_i915_gem_exec_object2 __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
int i;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8b3cde703364..1411613f2174 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -63,6 +63,13 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
}
#endif
+ /* Early VLV doesn't have this */
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ dev->pdev->revision < 0xb) {
+ DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
+ return 0;
+ }
+
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
@@ -110,7 +117,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -132,7 +139,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -156,7 +163,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 flags)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -164,7 +171,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
- pte |= BYT_PTE_WRITEABLE;
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
@@ -174,7 +182,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -187,7 +195,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -301,7 +309,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 unused)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -639,7 +647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pd_entry;
int pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
@@ -941,7 +949,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -964,7 +972,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -981,7 +989,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
- cache_level, true);
+ cache_level, true, flags);
+
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
@@ -1218,8 +1227,12 @@ ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ /* Currently applicable only to VLV */
+ if (vma->obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
- cache_level);
+ cache_level, flags);
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1394,7 +1407,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1402,7 +1415,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
- dma_addr_t addr = 0;
+ dma_addr_t addr = 0; /* shut up gcc */
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_dma_address(sg_iter.sg) +
@@ -1440,7 +1453,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1448,11 +1461,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
- dma_addr_t addr;
+ dma_addr_t addr = 0;
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
i++;
}
@@ -1462,9 +1475,10 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
- if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level, true));
+ if (i != 0) {
+ unsigned long gtt = readl(&gtt_entries[i-1]);
+ WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
+ }
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -1518,7 +1532,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -1567,6 +1581,10 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ /* Currently applicable only to VLV */
+ if (obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1583,7 +1601,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
obj->has_global_gtt_mapping = 1;
}
}
@@ -1595,7 +1613,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b96a06be3cb..8d6f7c18c404 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -154,6 +154,7 @@ struct i915_vma {
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
+#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
@@ -197,7 +198,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid); /* Create a valid PTE */
+ bool valid, u32 flags); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -205,7 +206,7 @@ struct i915_address_space {
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level cache_level);
+ enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 34894b573064..e60be3f552a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,64 +28,13 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct i915_render_state {
+struct render_state {
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
- unsigned long ggtt_offset;
- u32 *batch;
- u32 size;
- u32 len;
+ u64 ggtt_offset;
+ int gen;
};
-static struct i915_render_state *render_state_alloc(struct drm_device *dev)
-{
- struct i915_render_state *so;
- struct page *page;
- int ret;
-
- so = kzalloc(sizeof(*so), GFP_KERNEL);
- if (!so)
- return ERR_PTR(-ENOMEM);
-
- so->obj = i915_gem_alloc_object(dev, 4096);
- if (so->obj == NULL) {
- ret = -ENOMEM;
- goto free;
- }
- so->size = 4096;
-
- ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
- if (ret)
- goto free_gem;
-
- BUG_ON(so->obj->pages->nents != 1);
- page = sg_page(so->obj->pages->sgl);
-
- so->batch = kmap(page);
- if (!so->batch) {
- ret = -ENOMEM;
- goto unpin;
- }
-
- so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-
- return so;
-unpin:
- i915_gem_object_ggtt_unpin(so->obj);
-free_gem:
- drm_gem_object_unreference(&so->obj->base);
-free:
- kfree(so);
- return ERR_PTR(ret);
-}
-
-static void render_state_free(struct i915_render_state *so)
-{
- kunmap(kmap_to_page(so->batch));
- i915_gem_object_ggtt_unpin(so->obj);
- drm_gem_object_unreference(&so->obj->base);
- kfree(so);
-}
-
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
-static int render_state_setup(const int gen,
- const struct intel_renderstate_rodata *rodata,
- struct i915_render_state *so)
+static int render_state_init(struct render_state *so, struct drm_device *dev)
{
- const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
- u32 reloc_index = 0;
- u32 * const d = so->batch;
- unsigned int i = 0;
int ret;
- if (!rodata || rodata->batch_items * 4 > so->size)
+ so->gen = INTEL_INFO(dev)->gen;
+ so->rodata = render_state_get_rodata(dev, so->gen);
+ if (so->rodata == NULL)
+ return 0;
+
+ if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
+ so->obj = i915_gem_alloc_object(dev, 4096);
+ if (so->obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+ if (ret)
+ goto free_gem;
+
+ so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+ return 0;
+
+free_gem:
+ drm_gem_object_unreference(&so->obj->base);
+ return ret;
+}
+
+static int render_state_setup(struct render_state *so)
+{
+ const struct intel_renderstate_rodata *rodata = so->rodata;
+ unsigned int i = 0, reloc_index = 0;
+ struct page *page;
+ u32 *d;
+ int ret;
+
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
if (ret)
return ret;
+ page = sg_page(so->obj->pages->sgl);
+ d = kmap(page);
+
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
- if (reloc_index < rodata->reloc_items &&
- i * 4 == rodata->reloc[reloc_index]) {
-
- s += goffset & 0xffffffff;
-
- /* We keep batch offsets max 32bit */
- if (gen >= 8) {
+ if (i * 4 == rodata->reloc[reloc_index]) {
+ u64 r = s + so->ggtt_offset;
+ s = lower_32_bits(r);
+ if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
- d[i] = s;
- i++;
- s = (goffset & 0xffffffff00000000ull) >> 32;
+ d[i++] = s;
+ s = upper_32_bits(r);
}
reloc_index++;
}
- d[i] = s;
- i++;
+ d[i++] = s;
}
+ kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
return ret;
- if (rodata->reloc_items != reloc_index) {
- DRM_ERROR("not all relocs resolved, %d out of %d\n",
- reloc_index, rodata->reloc_items);
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
- so->len = rodata->batch_items * 4;
-
return 0;
}
+static void render_state_fini(struct render_state *so)
+{
+ i915_gem_object_ggtt_unpin(so->obj);
+ drm_gem_object_unreference(&so->obj->base);
+}
+
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
- const int gen = INTEL_INFO(ring->dev)->gen;
- struct i915_render_state *so;
- const struct intel_renderstate_rodata *rodata;
+ struct render_state so;
int ret;
if (WARN_ON(ring->id != RCS))
return -ENOENT;
- rodata = render_state_get_rodata(ring->dev, gen);
- if (rodata == NULL)
- return 0;
+ ret = render_state_init(&so, ring->dev);
+ if (ret)
+ return ret;
- so = render_state_alloc(ring->dev);
- if (IS_ERR(so))
- return PTR_ERR(so);
+ if (so.rodata == NULL)
+ return 0;
- ret = render_state_setup(gen, rodata, so);
+ ret = render_state_setup(&so);
if (ret)
goto out;
ret = ring->dispatch_execbuffer(ring,
- i915_gem_obj_ggtt_offset(so->obj),
- so->len,
+ so.ggtt_offset,
+ so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so->obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
- render_state_free(so);
+ render_state_fini(&so);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 7465ab0fd396..21c025a209c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -147,30 +147,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base;
}
-static int i915_setup_compression(struct drm_device *dev, int size)
+static int find_compression_threshold(struct drm_device *dev,
+ struct drm_mm_node *node,
+ int size,
+ int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int compression_threshold = 1;
int ret;
- compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
- if (!compressed_fb)
- goto err_llb;
+ /* HACK: This code depends on what we will do in *_enable_fbc. If that
+ * code changes, this code needs to change as well.
+ *
+ * The enable_fbc code will attempt to use one of our 2 compression
+ * thresholds, therefore, in that case, we only have 1 resort.
+ */
- /* Try to over-allocate to reduce reallocations and fragmentation */
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ /* Try to over-allocate to reduce reallocations and fragmentation. */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
- size >>= 1, 4096,
- DRM_MM_SEARCH_DEFAULT);
- if (ret)
+ if (ret == 0)
+ return compression_threshold;
+
+again:
+ /* HW's ability to limit the CFB is 1:4 */
+ if (compression_threshold > 4 ||
+ (fb_cpp == 2 && compression_threshold == 2))
+ return 0;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret && INTEL_INFO(dev)->gen <= 4) {
+ return 0;
+ } else if (ret) {
+ compression_threshold <<= 1;
+ goto again;
+ } else {
+ return compression_threshold;
+ }
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *uninitialized_var(compressed_llb);
+ int ret;
+
+ ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
+ size, fb_cpp);
+ if (!ret)
goto err_llb;
+ else if (ret > 1) {
+ DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+ }
+
+ dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -184,13 +222,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + compressed_fb->start);
+ dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.compressed_fb = compressed_fb;
- dev_priv->fbc.size = size;
+ dev_priv->fbc.size = size / dev_priv->fbc.threshold;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -199,14 +236,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
err_fb:
kfree(compressed_llb);
- drm_mm_remove_node(compressed_fb);
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
- kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -219,7 +255,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
- return i915_setup_compression(dev, size);
+ return i915_setup_compression(dev, size, fb_cpp);
}
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
@@ -229,10 +265,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
if (dev_priv->fbc.size == 0)
return;
- if (dev_priv->fbc.compressed_fb) {
- drm_mm_remove_node(dev_priv->fbc.compressed_fb);
- kfree(dev_priv->fbc.compressed_fb);
- }
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
@@ -336,9 +369,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
+
+static void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
+ obj->stolen = NULL;
+ }
+}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
+ .release = i915_gem_object_release_stolen,
};
static struct drm_i915_gem_object *
@@ -496,13 +540,3 @@ err_out:
drm_gem_object_unreference(&obj->base);
return NULL;
}
-
-void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
- if (obj->stolen) {
- drm_mm_remove_node(obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea92886a56..fe69fc837d9e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -40,19 +40,87 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
+ struct list_head linear;
struct drm_device *dev;
struct mm_struct *mm;
struct work_struct work;
unsigned long count;
unsigned long serial;
+ bool has_linear;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mmu;
struct interval_tree_node it;
+ struct list_head link;
struct drm_i915_gem_object *obj;
+ bool is_linear;
};
+static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ unsigned long end;
+
+ mutex_lock(&dev->struct_mutex);
+ /* Cancel any active worker and force us to re-evaluate gup */
+ obj->userptr.work = NULL;
+
+ if (obj->pages != NULL) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_vma *vma, *tmp;
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ WARN_ON(ret && ret != -EIO);
+ }
+ WARN_ON(i915_gem_object_put_pages(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ end = obj->userptr.ptr + obj->base.size;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ return end;
+}
+
+static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_object *mmu;
+ unsigned long serial;
+
+restart:
+ serial = mn->serial;
+ list_for_each_entry(mmu, &mn->linear, link) {
+ struct drm_i915_gem_object *obj;
+
+ if (mmu->it.last < start || mmu->it.start > end)
+ continue;
+
+ obj = mmu->obj;
+ drm_gem_object_reference(&obj->base);
+ spin_unlock(&mn->lock);
+
+ cancel_userptr(obj);
+
+ spin_lock(&mn->lock);
+ if (serial != mn->serial)
+ goto restart;
+ }
+
+ return NULL;
+}
+
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
@@ -60,16 +128,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
{
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it = NULL;
+ unsigned long next = start;
unsigned long serial = 0;
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
- while (start < end) {
- struct drm_i915_gem_object *obj;
+ while (next < end) {
+ struct drm_i915_gem_object *obj = NULL;
- obj = NULL;
spin_lock(&mn->lock);
- if (serial == mn->serial)
- it = interval_tree_iter_next(it, start, end);
+ if (mn->has_linear)
+ it = invalidate_range__linear(mn, mm, start, end);
+ else if (serial == mn->serial)
+ it = interval_tree_iter_next(it, next, end);
else
it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) {
@@ -81,31 +151,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
if (obj == NULL)
return;
- mutex_lock(&mn->dev->struct_mutex);
- /* Cancel any active worker and force us to re-evaluate gup */
- obj->userptr.work = NULL;
-
- if (obj->pages != NULL) {
- struct drm_i915_private *dev_priv = to_i915(mn->dev);
- struct i915_vma *vma, *tmp;
- bool was_interruptible;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
- int ret = i915_vma_unbind(vma);
- WARN_ON(ret && ret != -EIO);
- }
- WARN_ON(i915_gem_object_put_pages(obj));
-
- dev_priv->mm.interruptible = was_interruptible;
- }
-
- start = obj->userptr.ptr + obj->base.size;
-
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&mn->dev->struct_mutex);
+ next = cancel_userptr(obj);
}
}
@@ -150,7 +196,9 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
mmu->mm = mm;
mmu->objects = RB_ROOT;
mmu->count = 0;
- mmu->serial = 0;
+ mmu->serial = 1;
+ INIT_LIST_HEAD(&mmu->linear);
+ mmu->has_linear = false;
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mmu->mn, mm);
@@ -197,6 +245,17 @@ static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
mmu->serial = 1;
}
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
+{
+ struct i915_mmu_object *mn;
+
+ list_for_each_entry(mn, &mmu->linear, link)
+ if (mn->is_linear)
+ return true;
+
+ return false;
+}
+
static void
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
struct i915_mmu_object *mn)
@@ -204,7 +263,11 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
lockdep_assert_held(&mmu->dev->struct_mutex);
spin_lock(&mmu->lock);
- interval_tree_remove(&mn->it, &mmu->objects);
+ list_del(&mn->link);
+ if (mn->is_linear)
+ mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
+ else
+ interval_tree_remove(&mn->it, &mmu->objects);
__i915_mmu_notifier_update_serial(mmu);
spin_unlock(&mmu->lock);
@@ -230,7 +293,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
*/
i915_gem_retire_requests(mmu->dev);
- /* Disallow overlapping userptr objects */
spin_lock(&mmu->lock);
it = interval_tree_iter_first(&mmu->objects,
mn->it.start, mn->it.last);
@@ -243,14 +305,22 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
* to flush their object references upon which the object will
* be removed from the interval-tree, or the the range is
* still in use by another client and the overlap is invalid.
+ *
+ * If we do have an overlap, we cannot use the interval tree
+ * for fast range invalidation.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
- ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
- } else {
+ if (!obj->userptr.workers)
+ mmu->has_linear = mn->is_linear = true;
+ else
+ ret = -EAGAIN;
+ } else
interval_tree_insert(&mn->it, &mmu->objects);
+
+ if (ret == 0) {
+ list_add(&mn->link, &mmu->linear);
__i915_mmu_notifier_update_serial(mmu);
- ret = 0;
}
spin_unlock(&mmu->lock);
mutex_unlock(&mmu->dev->struct_mutex);
@@ -611,12 +681,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
* We impose several restrictions upon the memory being mapped
* into the GPU.
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
- * 2. It cannot overlap any other userptr object in the same address space.
- * 3. It must be normal system memory, not a pointer into another map of IO
+ * 2. It must be normal system memory, not a pointer into another map of IO
* space (e.g. it must not be a GTT mmapping of another object).
- * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * 3. We only allow a bo as large as we could in theory map into the GTT,
* that is we limit the size to the total size of the GTT.
- * 5. The bo is marked as being snoopable. The backing pages are left
+ * 4. The bo is marked as being snoopable. The backing pages are left
* accessible directly by the CPU, but reads and writes by the GPU may
* incur the cost of a snoop (unless you have an LLC architecture).
*
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 66cf41765bf9..eab41f9390f8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -229,6 +229,8 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
return "wait";
case HANGCHECK_ACTIVE:
return "active";
+ case HANGCHECK_ACTIVE_LOOP:
+ return "active (loop)";
case HANGCHECK_KICK:
return "kick";
case HANGCHECK_HUNG:
@@ -327,6 +329,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
+ struct drm_i915_error_object *obj;
int i, j, offset, elt;
int max_hangcheck_score;
@@ -358,6 +361,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ for (i = 0; i < 4; i++)
+ err_printf(m, "GTIER gt %d: 0x%08x\n", i,
+ error->gtier[i]);
+ } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+ err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
@@ -395,8 +404,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
-
obj = error->ring[i].batchbuffer;
if (obj) {
err_puts(m, dev_priv->ring[i].name);
@@ -459,6 +466,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ if ((obj = error->semaphore_obj)) {
+ err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ elt * 4,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ }
+ }
+
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -529,6 +548,7 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->ring[i].requests);
}
+ i915_error_object_free(error->semaphore_obj);
kfree(error->active_bo);
kfree(error->overlay);
kfree(error->display);
@@ -746,7 +766,60 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
+
+static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ struct intel_engine_cs *to;
+ int i;
+
+ if (!i915_semaphore_is_enabled(dev_priv->dev))
+ return;
+
+ if (!error->semaphore_obj)
+ error->semaphore_obj =
+ i915_error_object_create(dev_priv,
+ dev_priv->semaphore_obj,
+ &dev_priv->gtt.base);
+
+ for_each_ring(to, dev_priv, i) {
+ int idx;
+ u16 signal_offset;
+ u32 *tmp;
+
+ if (ring == to)
+ continue;
+
+ signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+ / 4;
+ tmp = error->semaphore_obj->pages[0];
+ idx = intel_ring_sync_index(ring, to);
+
+ ering->semaphore_mboxes[idx] = tmp[signal_offset];
+ ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+ }
+}
+
+static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
+ ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
+ ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+
+ if (HAS_VEBOX(dev_priv->dev)) {
+ ering->semaphore_mboxes[2] =
+ I915_READ(RING_SYNC_2(ring->mmio_base));
+ ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ }
+}
+
static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
@@ -755,18 +828,10 @@ static void i915_record_ring_state(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
- ering->semaphore_mboxes[0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- ering->semaphore_mboxes[1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
- ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
- }
-
- if (HAS_VEBOX(dev)) {
- ering->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(ring->mmio_base));
- ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ if (INTEL_INFO(dev)->gen >= 8)
+ gen8_record_semaphore_state(dev_priv, error, ring, ering);
+ else
+ gen6_record_semaphore_state(dev_priv, ring, ering);
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -871,6 +936,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_ggtt_bound(obj))
+ continue;
+
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
break;
@@ -895,7 +963,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true;
- i915_record_ring_state(dev, ring, &error->ring[i]);
+ i915_record_ring_state(dev, error, ring, &error->ring[i]);
request = i915_gem_find_active_request(ring);
if (request) {
@@ -1032,6 +1100,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
struct drm_i915_error_state *error)
{
struct drm_device *dev = dev_priv->dev;
+ int i;
/* General organization
* 1. Registers specific to a single generation
@@ -1043,7 +1112,8 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
/* 1: Registers specific to a single generation */
if (IS_VALLEYVIEW(dev)) {
- error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ error->gtier[0] = I915_READ(GTIER);
+ error->ier = I915_READ(VLV_IER);
error->forcewake = I915_READ(FORCEWAKE_VLV);
}
@@ -1076,16 +1146,18 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
if (HAS_HW_CONTEXTS(dev))
error->ccid = I915_READ(CCID);
- if (HAS_PCH_SPLIT(dev))
- error->ier = I915_READ(DEIER) | I915_READ(GTIER);
- else {
- if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
- else
- error->ier = I915_READ(IER);
+ if (INTEL_INFO(dev)->gen >= 8) {
+ error->ier = I915_READ(GEN8_DE_MISC_IER);
+ for (i = 0; i < 4; i++)
+ error->gtier[i] = I915_READ(GEN8_GT_IER(i));
+ } else if (HAS_PCH_SPLIT(dev)) {
+ error->ier = I915_READ(DEIER);
+ error->gtier[0] = I915_READ(GTIER);
+ } else if (IS_GEN2(dev)) {
+ error->ier = I915_READ16(IER);
+ } else if (!IS_VALLEYVIEW(dev)) {
+ error->ier = I915_READ(IER);
}
-
- /* 4: Everything else */
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c05c84f3f091..390ccc2a3096 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (!intel_irqs_enabled(dev_priv))
return;
if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
POSTING_READ(GTIMR);
}
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
}
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
@@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, mask);
}
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
@@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, mask);
}
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, 0);
}
@@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
I915_WRITE(SDEIMR, sdeimr);
@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
return true;
}
+static void i915_digport_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, dig_port_work);
+ unsigned long irqflags;
+ u32 long_port_mask, short_port_mask;
+ struct intel_digital_port *intel_dig_port;
+ int i, ret;
+ u32 old_bits = 0;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ long_port_mask = dev_priv->long_hpd_port_mask;
+ dev_priv->long_hpd_port_mask = 0;
+ short_port_mask = dev_priv->short_hpd_port_mask;
+ dev_priv->short_hpd_port_mask = 0;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ bool valid = false;
+ bool long_hpd = false;
+ intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ continue;
+
+ if (long_port_mask & (1 << i)) {
+ valid = true;
+ long_hpd = true;
+ } else if (short_port_mask & (1 << i))
+ valid = true;
+
+ if (valid) {
+ ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+ if (ret == true) {
+ /* if we get true fallback to old school hpd */
+ old_bits |= (1 << intel_dig_port->base.hpd_pin);
+ }
+ }
+ }
+
+ if (old_bits) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->hpd_event_bits |= old_bits;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ schedule_work(&dev_priv->hotplug_work);
+ }
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -1109,10 +1156,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
bool changed = false;
u32 hpd_event_bits;
- /* HPD irq before everything is fully set up. */
- if (!dev_priv->enable_hotplug_processing)
- return;
-
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -1122,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (intel_encoder->hpd_pin > HPD_NONE &&
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
@@ -1152,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
@@ -1218,10 +1265,138 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_notify_mmio_flip(ring);
+
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
}
+static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *rps_ei)
+{
+ u32 cz_ts, cz_freq_khz;
+ u32 render_count, media_count;
+ u32 elapsed_render, elapsed_media, elapsed_time;
+ u32 residency = 0;
+
+ cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
+
+ render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
+ media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
+
+ if (rps_ei->cz_clock == 0) {
+ rps_ei->cz_clock = cz_ts;
+ rps_ei->render_c0 = render_count;
+ rps_ei->media_c0 = media_count;
+
+ return dev_priv->rps.cur_freq;
+ }
+
+ elapsed_time = cz_ts - rps_ei->cz_clock;
+ rps_ei->cz_clock = cz_ts;
+
+ elapsed_render = render_count - rps_ei->render_c0;
+ rps_ei->render_c0 = render_count;
+
+ elapsed_media = media_count - rps_ei->media_c0;
+ rps_ei->media_c0 = media_count;
+
+ /* Convert all the counters into common unit of milli sec */
+ elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
+ elapsed_render /= cz_freq_khz;
+ elapsed_media /= cz_freq_khz;
+
+ /*
+ * Calculate overall C0 residency percentage
+ * only if elapsed time is non zero
+ */
+ if (elapsed_time) {
+ residency =
+ ((max(elapsed_render, elapsed_media) * 100)
+ / elapsed_time);
+ }
+
+ return residency;
+}
+
+/**
+ * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
+ * busy-ness calculated from C0 counters of render & media power wells
+ * @dev_priv: DRM device private
+ *
+ */
+static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+{
+ u32 residency_C0_up = 0, residency_C0_down = 0;
+ u8 new_delay, adj;
+
+ dev_priv->rps.ei_interrupt_count++;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+
+ if (dev_priv->rps.up_ei.cz_clock == 0) {
+ vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
+ vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
+ return dev_priv->rps.cur_freq;
+ }
+
+
+ /*
+ * To down throttle, C0 residency should be less than down threshold
+ * for continous EI intervals. So calculate down EI counters
+ * once in VLV_INT_COUNT_FOR_DOWN_EI
+ */
+ if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+
+ dev_priv->rps.ei_interrupt_count = 0;
+
+ residency_C0_down = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.down_ei);
+ } else {
+ residency_C0_up = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.up_ei);
+ }
+
+ new_delay = dev_priv->rps.cur_freq;
+
+ adj = dev_priv->rps.last_adj;
+ /* C0 residency is greater than UP threshold. Increase Frequency */
+ if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else
+ adj = 1;
+
+ if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+
+ /*
+ * For better performance, jump directly
+ * to RPe if we're below it.
+ */
+ if (new_delay < dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
+
+ } else if (!dev_priv->rps.ei_interrupt_count &&
+ (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
+ if (adj < 0)
+ adj *= 2;
+ else
+ adj = -1;
+ /*
+ * This means, C0 residency is less than down threshold over
+ * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
+ */
+ if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+ }
+
+ return new_delay;
+}
+
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -1232,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- if (IS_BROADWELL(dev_priv->dev))
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ if (INTEL_INFO(dev_priv->dev)->gen >= 8)
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
else {
/* Make sure not to corrupt PMIMR state used by ringbuffer */
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1252,8 +1427,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
- else
- adj = 1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
/*
@@ -1268,11 +1445,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
- else
- adj = -1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */
new_delay = dev_priv->rps.cur_freq;
@@ -1372,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1386,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
return;
spin_lock(&dev_priv->irq_lock);
- ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+ gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
iir &= GT_PARITY_ERROR(dev);
@@ -1441,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1458,6 +1639,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
@@ -1465,7 +1647,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[RCS]);
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
- I915_WRITE(GEN8_GT_IIR(0), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1473,6 +1654,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
@@ -1480,7 +1662,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS2]);
- I915_WRITE(GEN8_GT_IIR(1), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1488,10 +1669,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_PM_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) {
- ret = IRQ_HANDLED;
- gen8_rps_irq_handler(dev_priv, tmp);
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
+ ret = IRQ_HANDLED;
+ gen8_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1499,11 +1680,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
- I915_WRITE(GEN8_GT_IIR(3), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -1514,23 +1695,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
+static int ilk_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 0;
+ case PORT_C:
+ return 8;
+ case PORT_D:
+ return 16;
+ }
+}
+
+static int g4x_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 17;
+ case PORT_C:
+ return 19;
+ case PORT_D:
+ return 21;
+ }
+}
+
+static inline enum port get_port_from_pin(enum hpd_pin pin)
+{
+ switch (pin) {
+ case HPD_PORT_B:
+ return PORT_B;
+ case HPD_PORT_C:
+ return PORT_C;
+ case HPD_PORT_D:
+ return PORT_D;
+ default:
+ return PORT_A; /* no hpd */
+ }
+}
+
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
+ u32 dig_hotplug_reg,
const u32 *hpd)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
+ enum port port;
bool storm_detected = false;
+ bool queue_dig = false, queue_hp = false;
+ u32 dig_shift;
+ u32 dig_port_mask = 0;
if (!hotplug_trigger)
return;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_trigger);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg);
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
+ if (!(hpd[i] & hotplug_trigger))
+ continue;
+
+ port = get_port_from_pin(i);
+ if (port && dev_priv->hpd_irq_port[port]) {
+ bool long_hpd;
+
+ if (IS_G4X(dev)) {
+ dig_shift = g4x_port_to_hotplug_shift(port);
+ long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ } else {
+ dig_shift = ilk_port_to_hotplug_shift(port);
+ long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ }
+
+ DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
+ /* for long HPD pulses we want to have the digital queue happen,
+ but we still want HPD storm detection to function. */
+ if (long_hpd) {
+ dev_priv->long_hpd_port_mask |= (1 << port);
+ dig_port_mask |= hpd[i];
+ } else {
+ /* for short HPD just trigger the digital queue */
+ dev_priv->short_hpd_port_mask |= (1 << port);
+ hotplug_trigger &= ~hpd[i];
+ }
+ queue_dig = true;
+ }
+ }
+ for (i = 1; i < HPD_NUM_PINS; i++) {
if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
/*
@@ -1550,7 +1812,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
- dev_priv->hpd_event_bits |= (1 << i);
+ if (!(dig_port_mask & hpd[i])) {
+ dev_priv->hpd_event_bits |= (1 << i);
+ queue_hp = true;
+ }
+
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -1579,7 +1845,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
* queue for otherwise the flush_work in the pageflip code will
* deadlock.
*/
- schedule_work(&dev_priv->hotplug_work);
+ if (queue_dig)
+ queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
+ if (queue_hp)
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1700,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1809,26 +2078,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- if (IS_G4X(dev)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ if (hotplug_status) {
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+ if (IS_G4X(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
- }
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
- hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
+ }
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+ }
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1839,29 +2110,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
while (true) {
- iir = I915_READ(VLV_IIR);
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
+ if (gt_iir)
+ I915_WRITE(GTIIR, gt_iir);
+
pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir)
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+ iir = I915_READ(VLV_IIR);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
ret = IRQ_HANDLED;
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
- valleyview_pipestat_irq_handler(dev, iir);
-
- /* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
-
+ if (gt_iir)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
-
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
}
out:
@@ -1882,21 +2160,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (master_ctl == 0 && iir == 0)
break;
+ ret = IRQ_HANDLED;
+
I915_WRITE(GEN8_MASTER_IRQ, 0);
- gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+ /* Find, clear, then process each source of interrupt */
- valleyview_pipestat_irq_handler(dev, iir);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
- /* Consume port. Then clear IIR or we'll miss events */
- i9xx_hpd_irq_handler(dev);
+ gen8_gt_irq_handler(dev, dev_priv, master_ctl);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
-
- ret = IRQ_HANDLED;
}
return ret;
@@ -1907,8 +2191,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+ u32 dig_hotplug_reg;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2014,8 +2302,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2132,6 +2424,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
}
}
+/*
+ * To handle irqs with the minimum potential races with fresh interrupts, we:
+ * 1 - Disable Master Interrupt Control.
+ * 2 - Find the source(s) of the interrupt.
+ * 3 - Clear the Interrupt Identity bits (IIR).
+ * 4 - Process the interrupt(s) that had bits set in the IIRs.
+ * 5 - Re-enable Master Interrupt Control.
+ */
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -2159,32 +2459,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
- I915_WRITE(GTIIR, gt_iir);
- ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 7)
ivb_display_irq_handler(dev, de_iir);
else
ilk_display_irq_handler(dev, de_iir);
- I915_WRITE(DEIIR, de_iir);
- ret = IRQ_HANDLED;
}
if (INTEL_INFO(dev)->gen >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
- gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
+ gen6_rps_irq_handler(dev_priv, pm_iir);
}
}
@@ -2215,36 +2517,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
+ /* Find, clear, then process each source of interrupt */
+
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
- if (tmp & GEN8_DE_MISC_GSE)
- intel_opregion_asle_intr(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_DE_MISC_GSE)
+ intel_opregion_asle_intr(dev);
+ else
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
- if (tmp & GEN8_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Port interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+ else
+ DRM_ERROR("Unexpected DE Port interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(pipe) {
@@ -2254,33 +2556,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
continue;
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
- if (pipe_iir & GEN8_PIPE_VBLANK)
- intel_pipe_handle_vblank(dev, pipe);
-
- if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (pipe_iir) {
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_PIPE_VBLANK)
+ intel_pipe_handle_vblank(dev, pipe);
- if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
- if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
- pipe_name(pipe),
- pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- }
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
- if (pipe_iir) {
- ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+ }
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
@@ -2292,13 +2593,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
-
- cpt_irq_handler(dev, pch_iir);
-
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
- }
+ cpt_irq_handler(dev, pch_iir);
+ } else
+ DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
}
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2753,12 +3054,7 @@ static bool
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
{
if (INTEL_INFO(dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return false;
+ return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2767,19 +3063,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
}
static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return NULL;
+ for_each_ring(signaller, dev_priv, i) {
+ if (ring == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[ring->id])
+ return signaller;
+ }
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
@@ -2792,8 +3089,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
}
}
- DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
- ring->id, ipehr);
+ DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+ ring->id, ipehr, offset);
return NULL;
}
@@ -2803,7 +3100,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
- int i;
+ u64 offset = 0;
+ int i, backwards;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2812,13 +3110,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
/*
* HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3
- * dwords. Note that we don't care about ACTHD here since that might
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
- for (i = 4; i; --i) {
+ for (i = backwards; i; --i) {
/*
* Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence
@@ -2838,7 +3138,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
return NULL;
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
- return semaphore_wait_to_signaller_ring(ring, ipehr);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ offset = ioread32(ring->buffer->virtual_start + head + 12);
+ offset <<= 32;
+ offset = ioread32(ring->buffer->virtual_start + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
}
static int semaphore_passed(struct intel_engine_cs *ring)
@@ -2884,8 +3189,14 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
- if (ring->hangcheck.acthd != acthd)
- return HANGCHECK_ACTIVE;
+ if (acthd != ring->hangcheck.acthd) {
+ if (acthd > ring->hangcheck.max_acthd) {
+ ring->hangcheck.max_acthd = acthd;
+ return HANGCHECK_ACTIVE;
+ }
+
+ return HANGCHECK_ACTIVE_LOOP;
+ }
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
@@ -2996,8 +3307,9 @@ static void i915_hangcheck_elapsed(unsigned long data)
switch (ring->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
- break;
case HANGCHECK_ACTIVE:
+ break;
+ case HANGCHECK_ACTIVE_LOOP:
ring->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
@@ -3017,6 +3329,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
+
+ ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
}
ring->hangcheck.seqno = seqno;
@@ -3159,7 +3473,9 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
for_each_pipe(pipe)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3168,6 +3484,18 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B]);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C]);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3492,8 +3820,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(pipe)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
- de_pipe_enables);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ dev_priv->de_irq_mask[pipe],
+ de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
}
@@ -4324,12 +4655,17 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */
- dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ if (IS_VALLEYVIEW(dev))
+ /* WaGsvRC0ResidenncyMethod:VLV */
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
@@ -4339,6 +4675,9 @@ void intel_irq_init(struct drm_device *dev)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* Haven't installed the IRQ handler yet */
+ dev_priv->pm._irqs_disabled = true;
+
if (IS_GEN2(dev)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4426,7 +4765,9 @@ void intel_hpd_init(struct drm_device *dev)
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (intel_connector->mst_port)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
@@ -4444,7 +4785,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev->driver->irq_uninstall(dev);
- dev_priv->pm.irqs_disabled = true;
+ dev_priv->pm._irqs_disabled = true;
}
/* Restore interrupts so we can recover from runtime PM. */
@@ -4452,7 +4793,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
dev->driver->irq_preinstall(dev);
dev->driver->irq_postinstall(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d05a2afa17dc..7f84dd263ee8 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -48,6 +48,8 @@ struct i915_params i915 __read_mostly = {
.disable_display = 0,
.enable_cmd_parser = 1,
.disable_vtd_wa = 0,
+ .use_mmio_flip = 0,
+ .mmio_debug = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -156,3 +158,12 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+MODULE_PARM_DESC(use_mmio_flip,
+ "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
+
+module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+MODULE_PARM_DESC(mmio_debug,
+ "Enable the MMIO debug code (default: false). This may negatively "
+ "affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a5bab61bfc00..e4d7607da2c4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,8 +29,8 @@
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
-#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
-#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
+#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
+ (pipe) == PIPE_B ? (b) : (c))
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -240,7 +240,7 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
@@ -266,6 +266,11 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
+#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
+#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
+#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_POLL (1<<15)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -360,6 +365,7 @@
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -525,10 +531,21 @@ enum punit_power_well {
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
+#define PUNIT_REG_CZ_TIMESTAMP 0xce
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+#define PUNIT_GPU_STATUS_REG 0xdb
+#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
+#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
+#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8
+#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff
+
+#define PUNIT_GPU_DUTYCYCLE_REG 0xdf
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff
+
#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
@@ -540,6 +557,11 @@ enum punit_power_well {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
+#define VLV_RP_UP_EI_THRESHOLD 90
+#define VLV_RP_DOWN_EI_THRESHOLD 70
+#define VLV_INT_COUNT_FOR_DOWN_EI 5
+
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
@@ -574,6 +596,11 @@ enum punit_power_well {
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
+#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
+#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
+#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
+#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
/**
* DOC: DPIO
@@ -761,6 +788,8 @@ enum punit_power_well {
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
+#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
+#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
#define _VLV_PCS01_DW8_CH0 0x0220
@@ -869,6 +898,16 @@ enum punit_power_well {
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+#define _CHV_CMN_DW5_CH0 0x8114
+#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
+#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
+#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
+#define CHV_BUFRIGHTENA1_MASK (3 << 20)
+#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
+#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
+#define CHV_BUFLEFTENA1_FORCE (3 << 22)
+#define CHV_BUFLEFTENA1_MASK (3 << 22)
+
#define _CHV_CMN_DW13_CH0 0x8134
#define _CHV_CMN_DW0_CH1 0x8080
#define DPIO_CHV_S1_DIV_SHIFT 21
@@ -883,8 +922,21 @@ enum punit_power_well {
#define _CHV_CMN_DW1_CH1 0x8084
#define DPIO_AFC_RECAL (1 << 14)
#define DPIO_DCLKP_EN (1 << 13)
+#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+#define _CHV_CMN_DW19_CH0 0x814c
+#define _CHV_CMN_DW6_CH1 0x8098
+#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+
#define CHV_CMN_DW30 0x8178
#define DPIO_LRC_BYPASS (1 << 3)
@@ -933,6 +985,7 @@ enum punit_power_well {
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
+
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
@@ -1170,6 +1223,8 @@ enum punit_power_well {
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT 12
+
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
@@ -1570,11 +1625,10 @@ enum punit_power_well {
/*
* Clock control & power management
*/
-#define DPLL_A_OFFSET 0x6014
-#define DPLL_B_OFFSET 0x6018
-#define CHV_DPLL_C_OFFSET 0x6030
-#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 0x6000
#define VGA1 0x6004
@@ -1662,11 +1716,10 @@ enum punit_power_well {
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
-#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
-#define CHV_DPLL_C_MD_OFFSET 0x603c
-#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2231,7 +2284,7 @@ enum punit_power_well {
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
-
+#define CHV_CLK_CTL1 0x101100
#define VLV_CLK_CTL2 0x101104
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -2376,6 +2429,7 @@ enum punit_power_well {
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
+#define BDW_PSR_SINGLE_FRAME (1<<30)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
@@ -2533,8 +2587,14 @@ enum punit_power_well {
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -2588,7 +2648,7 @@ enum punit_power_well {
#define PORT_DFT_I9XX 0x61150
#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X 0x61154
+#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
@@ -3803,47 +3863,47 @@ enum punit_power_well {
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
-#define DRAIN_LATENCY_PRECISION_16 16
+#define DRAIN_LATENCY_PRECISION_64 64
#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
-#define DDL_CURSORA_PRECISION_32 (1<<31)
-#define DDL_CURSORA_PRECISION_16 (0<<31)
+#define DDL_CURSORA_PRECISION_64 (1<<31)
+#define DDL_CURSORA_PRECISION_32 (0<<31)
#define DDL_CURSORA_SHIFT 24
-#define DDL_SPRITEB_PRECISION_32 (1<<23)
-#define DDL_SPRITEB_PRECISION_16 (0<<23)
+#define DDL_SPRITEB_PRECISION_64 (1<<23)
+#define DDL_SPRITEB_PRECISION_32 (0<<23)
#define DDL_SPRITEB_SHIFT 16
-#define DDL_SPRITEA_PRECISION_32 (1<<15)
-#define DDL_SPRITEA_PRECISION_16 (0<<15)
+#define DDL_SPRITEA_PRECISION_64 (1<<15)
+#define DDL_SPRITEA_PRECISION_32 (0<<15)
#define DDL_SPRITEA_SHIFT 8
-#define DDL_PLANEA_PRECISION_32 (1<<7)
-#define DDL_PLANEA_PRECISION_16 (0<<7)
+#define DDL_PLANEA_PRECISION_64 (1<<7)
+#define DDL_PLANEA_PRECISION_32 (0<<7)
#define DDL_PLANEA_SHIFT 0
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
-#define DDL_CURSORB_PRECISION_32 (1<<31)
-#define DDL_CURSORB_PRECISION_16 (0<<31)
+#define DDL_CURSORB_PRECISION_64 (1<<31)
+#define DDL_CURSORB_PRECISION_32 (0<<31)
#define DDL_CURSORB_SHIFT 24
-#define DDL_SPRITED_PRECISION_32 (1<<23)
-#define DDL_SPRITED_PRECISION_16 (0<<23)
+#define DDL_SPRITED_PRECISION_64 (1<<23)
+#define DDL_SPRITED_PRECISION_32 (0<<23)
#define DDL_SPRITED_SHIFT 16
-#define DDL_SPRITEC_PRECISION_32 (1<<15)
-#define DDL_SPRITEC_PRECISION_16 (0<<15)
+#define DDL_SPRITEC_PRECISION_64 (1<<15)
+#define DDL_SPRITEC_PRECISION_32 (0<<15)
#define DDL_SPRITEC_SHIFT 8
-#define DDL_PLANEB_PRECISION_32 (1<<7)
-#define DDL_PLANEB_PRECISION_16 (0<<7)
+#define DDL_PLANEB_PRECISION_64 (1<<7)
+#define DDL_PLANEB_PRECISION_32 (0<<7)
#define DDL_PLANEB_SHIFT 0
#define VLV_DDL3 (VLV_DISPLAY_BASE + 0x70058)
-#define DDL_CURSORC_PRECISION_32 (1<<31)
-#define DDL_CURSORC_PRECISION_16 (0<<31)
+#define DDL_CURSORC_PRECISION_64 (1<<31)
+#define DDL_CURSORC_PRECISION_32 (0<<31)
#define DDL_CURSORC_SHIFT 24
-#define DDL_SPRITEF_PRECISION_32 (1<<23)
-#define DDL_SPRITEF_PRECISION_16 (0<<23)
+#define DDL_SPRITEF_PRECISION_64 (1<<23)
+#define DDL_SPRITEF_PRECISION_32 (0<<23)
#define DDL_SPRITEF_SHIFT 16
-#define DDL_SPRITEE_PRECISION_32 (1<<15)
-#define DDL_SPRITEE_PRECISION_16 (0<<15)
+#define DDL_SPRITEE_PRECISION_64 (1<<15)
+#define DDL_SPRITEE_PRECISION_32 (0<<15)
#define DDL_SPRITEE_SHIFT 8
-#define DDL_PLANEC_PRECISION_32 (1<<7)
-#define DDL_PLANEC_PRECISION_16 (0<<7)
+#define DDL_PLANEC_PRECISION_64 (1<<7)
+#define DDL_PLANEC_PRECISION_32 (0<<7)
#define DDL_PLANEC_SHIFT 0
/* FIFO watermark sizes etc */
@@ -4630,6 +4690,8 @@ enum punit_power_well {
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3CNTLREG2 0xB020
+#define GEN7_L3CNTLREG3 0xB024
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
@@ -4876,8 +4938,7 @@ enum punit_power_well {
#define _PCH_TRANSA_LINK_M2 0xe0048
#define _PCH_TRANSA_LINK_N2 0xe004c
-/* Per-transcoder DIP controls */
-
+/* Per-transcoder DIP controls (PCH) */
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
@@ -4890,6 +4951,7 @@ enum punit_power_well {
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+/* Per-transcoder DIP controls (VLV) */
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
@@ -4898,12 +4960,19 @@ enum punit_power_well {
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
+#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
+#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
+#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
+
#define VLV_TVIDEO_DIP_CTL(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
+ VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
#define VLV_TVIDEO_DIP_DATA(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
+ VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
#define VLV_TVIDEO_DIP_GCP(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
@@ -5334,6 +5403,7 @@ enum punit_power_well {
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
@@ -5471,6 +5541,12 @@ enum punit_power_well {
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define CHV_CZ_CLOCK_FREQ_MODE_200 200
+#define CHV_CZ_CLOCK_FREQ_MODE_267 267
+#define CHV_CZ_CLOCK_FREQ_MODE_320 320
+#define CHV_CZ_CLOCK_FREQ_MODE_333 333
+#define CHV_CZ_CLOCK_FREQ_MODE_400 400
+
#define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH_REG_NUM 8
@@ -5481,6 +5557,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
+#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
+#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
@@ -5489,6 +5567,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
+#define VLV_RENDER_C0_COUNT_REG 0x138118
+#define VLV_MEDIA_C0_COUNT_REG 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@@ -5723,6 +5803,7 @@ enum punit_power_well {
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_PORT_SHIFT 28
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
@@ -5743,6 +5824,7 @@ enum punit_power_well {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
#define TRANS_DDI_BFI_ENABLE (1<<4)
/* DisplayPort Transport Control */
@@ -5752,6 +5834,7 @@ enum punit_power_well {
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_FORCE_ACT (1<<25)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
@@ -5766,15 +5849,19 @@ enum punit_power_well {
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
+#define DP_TP_STATUS_ACT_SENT (1<<24)
+#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
-/* Haswell */
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -5784,16 +5871,6 @@ enum punit_power_well {
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
-/* Broadwell */
-#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
-#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
-#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
-#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
-#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
-#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
-#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
-#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
-#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
@@ -5861,10 +5938,12 @@ enum punit_power_well {
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
-#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+#define WRPLL_PLL_SSC (1<<28)
+#define WRPLL_PLL_NON_SSC (2<<28)
+#define WRPLL_PLL_LCPLL (3<<28)
+#define WRPLL_PLL_REF_MASK (3<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -5883,6 +5962,7 @@ enum punit_power_well {
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
@@ -5924,7 +6004,10 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
-#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using I915_WRITE. */
+#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_BDW 0x138144
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0)
@@ -6005,7 +6088,8 @@ enum punit_power_well {
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
+ _MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@@ -6047,18 +6131,20 @@ enum punit_power_well {
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
+ _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
-#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
-#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
+ _MIPIB_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -6066,12 +6152,14 @@ enum punit_power_well {
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
-#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
-#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
-#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
-#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
-#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
+ _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
+ _MIPIB_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6105,9 +6193,10 @@ enum punit_power_well {
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
+ _MIPIB_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6128,78 +6217,94 @@ enum punit_power_well {
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
+ _MIPIB_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
+ _MIPIB_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
+ _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
+ _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
-#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
-#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
+ _MIPIB_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
+ _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
-
-#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
-#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
-#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
-
-#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
-#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
-#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
-
-#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
-
-#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
+ _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
+ _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
+ _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
+ _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
-#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
-#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
-
-#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
-#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
-#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
-
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
-#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
-#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
-#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
+ _MIPIB_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -6208,27 +6313,31 @@ enum punit_power_well {
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
-#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
-#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
+ _MIPIB_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
-#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
-#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
+ _MIPIB_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
+ _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -6236,9 +6345,10 @@ enum punit_power_well {
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
-#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
-#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
+ _MIPIB_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6248,28 +6358,33 @@ enum punit_power_well {
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
-#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
-#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
+ _MIPIB_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
-#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
-#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
+ _MIPIB_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
-#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
-#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
-#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
-#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
-#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
+ _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
+ _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
+ _MIPIB_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6280,9 +6395,10 @@ enum punit_power_well {
#define DATA_TYPE_MASK (3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
-#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
+ _MIPIB_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6298,16 +6414,18 @@ enum punit_power_well {
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
+ _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
-#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
-#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
+ _MIPIB_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -6318,34 +6436,41 @@ enum punit_power_well {
#define PREPARE_COUNT_MASK (0x3f << 0)
/* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
-#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
-#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
-
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
+ _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
+ _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
-#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
+ _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
-#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
-#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
-#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
+ _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
+ _MIPIB_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -6359,9 +6484,10 @@ enum punit_power_well {
/* MIPI adapter registers */
-#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
-#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
-#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
+ _MIPIB_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -6373,50 +6499,52 @@ enum punit_power_well {
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
-#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
-#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
-#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
+ _MIPIB_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
-#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
-#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
+ _MIPIB_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
-#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
+ _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
-#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
-#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
+ _MIPIB_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
-#define MIPI_READ_DATA_RETURN(pipe, n) \
- (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(tc, n) \
+ (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+ + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
-#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
-#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
+ _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 86ce39aad0ff..ae7fd8fc27f0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
intel_runtime_pm_get(dev_priv);
- /* On VLV, residency time is in CZ units rather than 1.28us */
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
- u32 clkctl2;
+ u32 reg, czcount_30ns;
- clkctl2 = I915_READ(VLV_CLK_CTL2) >>
- CLK_CTL2_CZCOUNT_30NS_SHIFT;
- if (!clkctl2) {
- WARN(!clkctl2, "bogus CZ count value");
+ if (IS_CHERRYVIEW(dev))
+ reg = CHV_CLK_CTL1;
+ else
+ reg = VLV_CLK_CTL2;
+
+ czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+
+ if (!czcount_30ns) {
+ WARN(!czcount_30ns, "bogus CZ count value");
ret = 0;
goto out;
}
- units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+
+ units = 0;
+ div = 1000000ULL;
+
+ if (IS_CHERRYVIEW(dev)) {
+ /* Special case for 320Mhz */
+ if (czcount_30ns == 1) {
+ div = 10000000ULL;
+ units = 3125ULL;
+ } else {
+ /* chv counts are one less */
+ czcount_30ns += 1;
+ }
+ }
+
+ if (units == 0)
+ units = DIV_ROUND_UP_ULL(30ULL * bias,
+ (u64)czcount_30ns);
+
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
- div = 1000000ULL * bias;
+ div = div * bias;
}
raw_time = I915_READ(reg) * units;
@@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {
- val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ else
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
- val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ else
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
- val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ else
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
} else {
BUG();
}
@@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
&dev_attr_vlv_rpe_freq_mhz.attr,
NULL,
};
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 827498e081df..a66955037e4e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- entry->min_brightness,
+ dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
}
@@ -877,7 +878,7 @@ err:
/* error during parsing so set all pointers to null
* because of partial parsing */
- memset(dev_priv->vbt.dsi.sequence, 0, MIPI_SEQ_MAX);
+ memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
}
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 5a045d3bd77e..2efaf8e8d9c4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
+static void hsw_crt_pre_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
+ POSTING_READ(SPLL_CTL);
+ udelay(20);
+}
+
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
+
+static void hsw_crt_post_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+}
+
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev)) {
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2;
+ }
return true;
}
@@ -632,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
- intel_runtime_pm_get(dev_priv);
-
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
force);
@@ -685,8 +711,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -860,6 +884,8 @@ void intel_crt_init(struct drm_device *dev)
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.pre_enable = hsw_crt_pre_enable;
+ crt->base.post_disable = hsw_crt_post_disable;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -869,7 +895,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (!I915_HAS_HOTPLUG(dev))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b17b9c7c769f..5db0b5552e39 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
+ 0x00AAAFFF, 0x000E000A,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
- 0x00FFFFFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
+ 0x80B2CFFF, 0x001B0002,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
- 0x80FFFFFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
+ return intel_dig_port->port;
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
@@ -277,7 +280,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
+ WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -364,6 +368,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+
+}
+
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
@@ -385,53 +401,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t val;
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- plls->spll_refcount--;
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Disabling SPLL\n");
- val = I915_READ(SPLL_CTL);
- WARN_ON(!(val & SPLL_PLL_ENABLE));
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
- }
- break;
- case PORT_CLK_SEL_WRPLL1:
- plls->wrpll1_refcount--;
- if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 1\n");
- val = I915_READ(WRPLL_CTL1);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL1);
- }
- break;
- case PORT_CLK_SEL_WRPLL2:
- plls->wrpll2_refcount--;
- if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 2\n");
- val = I915_READ(WRPLL_CTL2);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL2);
- }
- break;
- }
-
- WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
- WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
- WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
-
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
-}
-
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
@@ -592,9 +561,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
u32 wrpll;
wrpll = I915_READ(reg);
- switch (wrpll & SPLL_PLL_REF_MASK) {
- case SPLL_PLL_SSC:
- case SPLL_PLL_NON_SSC:
+ switch (wrpll & WRPLL_PLL_REF_MASK) {
+ case WRPLL_PLL_SSC:
+ case WRPLL_PLL_NON_SSC:
/*
* We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of
@@ -602,7 +571,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
*/
refclk = 135;
break;
- case SPLL_PLL_LCPLL:
+ case WRPLL_PLL_LCPLL:
refclk = LC_FREQ;
break;
default:
@@ -618,15 +587,14 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-static void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- enum port port = intel_ddi_get_encoder_port(encoder);
int link_clock = 0;
u32 val, pll;
- val = I915_READ(PORT_CLK_SEL(port));
+ val = pipe_config->ddi_pll_sel;
switch (val & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_LCPLL_810:
link_clock = 81000;
@@ -750,173 +718,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
- enum pipe pipe = intel_crtc->pipe;
int clock = intel_crtc->config.port_clock;
- intel_ddi_put_crtc_pll(crtc);
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_put_shared_dpll(intel_crtc);
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
- break;
- case DP_LINK_BW_2_7:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
- break;
- case DP_LINK_BW_5_4:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
- break;
- default:
- DRM_ERROR("Link bandwidth %d unsupported\n",
- intel_dp->link_bw);
- return false;
- }
-
- } else if (type == INTEL_OUTPUT_HDMI) {
- uint32_t reg, val;
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_shared_dpll *pll;
+ uint32_t val;
unsigned p, n2, r2;
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- if (val == I915_READ(WRPLL_CTL1)) {
- DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (val == I915_READ(WRPLL_CTL2)) {
- DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else {
- DRM_ERROR("No WRPLLs available!\n");
- return false;
- }
-
- DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- clock, p, n2, r2);
-
- if (reg == WRPLL_CTL1) {
- plls->wrpll1_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
- } else {
- plls->wrpll2_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
- }
+ intel_crtc->config.dpll_hw_state.wrpll = val;
- } else if (type == INTEL_OUTPUT_ANALOG) {
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
- pipe_name(pipe));
- plls->spll_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
- } else {
- DRM_ERROR("SPLL already in use\n");
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
return false;
}
- } else {
- WARN(1, "Invalid DDI encoder type %d\n", type);
- return false;
+ intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
}
-/*
- * To be called after intel_ddi_pll_select(). That one selects the PLL to be
- * used, this one actually enables the PLL.
- */
-void intel_ddi_pll_enable(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- int clock = crtc->config.port_clock;
- uint32_t reg, cur_val, new_val;
- int refcount;
- const char *pll_name;
- uint32_t enable_bit = (1 << 31);
- unsigned int p, n2, r2;
-
- BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
- BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
-
- switch (crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_LCPLL_2700:
- case PORT_CLK_SEL_LCPLL_1350:
- case PORT_CLK_SEL_LCPLL_810:
- /*
- * LCPLL should always be enabled at this point of the mode set
- * sequence, so nothing to do.
- */
- return;
-
- case PORT_CLK_SEL_SPLL:
- pll_name = "SPLL";
- reg = SPLL_CTL;
- refcount = plls->spll_refcount;
- new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SSC;
- break;
-
- case PORT_CLK_SEL_WRPLL1:
- case PORT_CLK_SEL_WRPLL2:
- if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
- pll_name = "WRPLL1";
- reg = WRPLL_CTL1;
- refcount = plls->wrpll1_refcount;
- } else {
- pll_name = "WRPLL2";
- reg = WRPLL_CTL2;
- refcount = plls->wrpll2_refcount;
- }
-
- intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
-
- break;
-
- case PORT_CLK_SEL_NONE:
- WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
- return;
- default:
- WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
- return;
- }
-
- cur_val = I915_READ(reg);
-
- WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
- if (refcount == 1) {
- WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
- I915_WRITE(reg, new_val);
- POSTING_READ(reg);
- udelay(20);
- } else {
- WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
- }
-}
-
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -926,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
int type = intel_encoder->type;
uint32_t temp;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
-
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config.pipe_bpp) {
case 18:
@@ -949,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
}
}
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ uint32_t temp;
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (state == true)
+ temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ else
+ temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -995,7 +841,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
+ if (IS_HASWELL(dev) &&
+ (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1026,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ } else if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
+
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else {
@@ -1043,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
}
@@ -1082,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP)
return true;
- case TRANS_DDI_MODE_SELECT_DP_MST:
return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ /* if the transcoder is in MST state then
+ * connector isn't connected */
+ return false;
case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA);
@@ -1135,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
if ((tmp & TRANS_DDI_PORT_MASK)
== TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
+ return false;
+
*pipe = i;
return true;
}
@@ -1146,76 +1012,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return false;
}
-static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t temp, ret;
- enum port port = I915_MAX_PORTS;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- int i;
-
- if (cpu_transcoder == TRANSCODER_EDP) {
- port = PORT_A;
- } else {
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- temp &= TRANS_DDI_PORT_MASK;
-
- for (i = PORT_B; i <= PORT_E; i++)
- if (temp == TRANS_DDI_SELECT_PORT(i))
- port = i;
- }
-
- if (port == I915_MAX_PORTS) {
- WARN(1, "Pipe %c enabled on an unknown port\n",
- pipe_name(pipe));
- ret = PORT_CLK_SEL_NONE;
- } else {
- ret = I915_READ(PORT_CLK_SEL(port));
- DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
- "0x%08x\n", pipe_name(pipe), port_name(port),
- ret);
- }
-
- return ret;
-}
-
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *intel_crtc;
-
- dev_priv->ddi_plls.spll_refcount = 0;
- dev_priv->ddi_plls.wrpll1_refcount = 0;
- dev_priv->ddi_plls.wrpll2_refcount = 0;
-
- for_each_pipe(pipe) {
- intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (!intel_crtc->active) {
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
- continue;
- }
-
- intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
- pipe);
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- dev_priv->ddi_plls.spll_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL1:
- dev_priv->ddi_plls.wrpll1_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL2:
- dev_priv->ddi_plls.wrpll2_refcount++;
- break;
- }
- }
-}
-
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
@@ -1261,17 +1057,13 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp);
}
- WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
+ WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
- intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ intel_ddi_init_dp_buf_reg(intel_encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
@@ -1418,10 +1210,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
}
}
+static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+ POSTING_READ(WRPLL_CTL(pll->id));
+ udelay(20);
+}
+
+static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ hw_state->wrpll = val;
+
+ return val & WRPLL_PLL_ENABLE;
+}
+
+static const char * const hsw_ddi_pll_names[] = {
+ "WRPLL 1",
+ "WRPLL 2",
+};
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
+ int i;
+
+ dev_priv->num_shared_dpll = 2;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ hsw_ddi_pll_get_hw_state;
+ }
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
@@ -1465,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ val = DP_TP_CTL_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ if (intel_dp->is_mst)
+ val |= DP_TP_CTL_MODE_MST;
+ else {
+ val |= DP_TP_CTL_MODE_SST;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
@@ -1507,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- int type = intel_encoder->type;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ int type = intel_dig_port->base.type;
+
+ if (type != INTEL_OUTPUT_DISPLAYPORT &&
+ type != INTEL_OUTPUT_EDP &&
+ type != INTEL_OUTPUT_UNKNOWN) {
+ return;
+ }
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
- intel_dp_check_link_status(intel_dp);
+ intel_dp_hot_plug(intel_encoder);
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1663,15 +1515,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- struct intel_connector *hdmi_connector = NULL;
- struct intel_connector *dp_connector = NULL;
bool init_hdmi, init_dp;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
port_name(port));
init_hdmi = true;
init_dp = true;
@@ -1701,20 +1551,28 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
DDI_A_4_LANES);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug;
- if (init_dp)
- dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(intel_dig_port))
+ goto err;
+
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+ }
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
- if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
- hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
-
- if (!dp_connector && !hdmi_connector) {
- drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
+ if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+ goto err;
}
+
+ return;
+
+err:
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0be855ddf45..018fb7222f60 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -39,12 +39,45 @@
#include "i915_trace.h"
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
+/* Primary plane formats supported by all gen */
+#define COMMON_PRIMARY_FORMATS \
+ DRM_FORMAT_C8, \
+ DRM_FORMAT_RGB565, \
+ DRM_FORMAT_XRGB8888, \
+ DRM_FORMAT_ARGB8888
+
+/* Primary plane formats for gen <= 3 */
+static const uint32_t intel_primary_formats_gen2[] = {
+ COMMON_PRIMARY_FORMATS,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const uint32_t intel_primary_formats_gen4[] = {
+ COMMON_PRIMARY_FORMATS, \
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+/* Cursor formats */
+static const uint32_t intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
- ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
-static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -68,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
+static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
+{
+ if (!connector->mst_port)
+ return connector->encoder;
+ else
+ return &connector->mst_port->mst_encoders[pipe]->base;
+}
+
typedef struct {
int min, max;
} intel_range_t;
@@ -1061,11 +1102,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (HAS_PCH_LPT(dev_priv->dev)) {
- DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
- return;
- }
-
if (WARN (!pll,
"asserting DPLL %s with no DPLL\n", state_string(state)))
return;
@@ -1481,9 +1517,6 @@ static void intel_reset_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_VALLEYVIEW(dev))
- return;
-
if (IS_CHERRYVIEW(dev)) {
enum dpio_phy phy;
u32 val;
@@ -1505,26 +1538,6 @@ static void intel_reset_dpio(struct drm_device *dev)
I915_WRITE(DISPLAY_PHY_CONTROL,
PHY_COM_LANE_RESET_DEASSERT(phy, val));
}
-
- } else {
- /*
- * If DPIO has already been reset, e.g. by BIOS, just skip all
- * this.
- */
- if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- false);
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- true);
}
}
@@ -1712,6 +1725,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1749,6 +1773,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ if (WARN_ON(pll == NULL))
+ return;
+
WARN_ON(!pll->refcount);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
@@ -1790,12 +1817,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
}
WARN_ON(pll->on);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll);
pll->on = true;
}
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1826,6 +1855,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll);
pll->on = false;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -2172,6 +2203,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
u32 alignment;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2228,6 +2261,8 @@ err_interruptible:
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
+ WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+
i915_gem_object_unpin_fence(obj);
i915_gem_object_unpin_from_display_plane(obj);
}
@@ -2314,6 +2349,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
goto out_unref_obj;
}
+ obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("plane fb obj %p\n", obj);
@@ -2331,7 +2367,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_crtc *c;
struct intel_crtc *i;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
if (!intel_crtc->base.primary->fb)
return;
@@ -2352,13 +2388,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (c == &intel_crtc->base)
continue;
- if (!i->active || !c->primary->fb)
+ if (!i->active)
+ continue;
+
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+ if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
}
@@ -2371,16 +2411,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2461,16 +2497,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2546,7 +2578,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
@@ -2601,7 +2633,7 @@ void intel_display_handle_reset(struct drm_device *dev)
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
@@ -2647,7 +2679,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_framebuffer *old_fb;
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
if (intel_crtc_has_pending_flip(crtc)) {
@@ -2669,9 +2704,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
@@ -2711,7 +2747,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dev_priv->display.update_primary_plane(crtc, fb, x, y);
- old_fb = crtc->primary->fb;
+ if (intel_crtc->active)
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
@@ -2720,13 +2758,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3587,7 +3624,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void intel_put_shared_dpll(struct intel_crtc *crtc)
+void intel_put_shared_dpll(struct intel_crtc *crtc)
{
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3607,7 +3644,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3818,7 +3855,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
}
/* use legacy palette for Ironlake */
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
palreg = LGC_PALETTE(pipe);
/* Workaround : Do not read or write the pipe palette/gamma data while
@@ -3860,30 +3897,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
-/**
- * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
- * cursor plane briefly if not already running after enabling the display
- * plane.
- * This workaround avoids occasional blank screens when self refresh is
- * enabled.
- */
-static void
-g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 cntl = I915_READ(CURCNTR(pipe));
-
- if ((cntl & CURSOR_MODE) == 0) {
- u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
-
- I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
- I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- I915_WRITE(CURCNTR(pipe), cntl);
- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
- I915_WRITE(FW_BLC_SELF, fw_bcl_self);
- }
-}
-
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3892,11 +3905,10 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ drm_vblank_on(dev, pipe);
+
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
- /* The fixup needs to happen before cursor is enabled */
- if (IS_G4X(dev))
- g4x_fixup_plane(dev_priv, pipe);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
@@ -3904,8 +3916,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip from a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3917,7 +3935,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- drm_crtc_vblank_off(crtc);
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
@@ -3928,6 +3945,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip to a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+
+ drm_vblank_off(dev, pipe);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4006,8 +4032,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
cpt_verify_modeset(dev, intel_crtc->pipe);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4059,6 +4083,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_enable_shared_dpll(intel_crtc);
+
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -4083,16 +4110,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
-
- if (intel_crtc->config.has_pch_encoder)
- dev_priv->display.fdi_link_train(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ dev_priv->display.fdi_link_train(crtc);
+ }
+
intel_ddi_enable_pipe_clock(intel_crtc);
ironlake_pfit_enable(intel_crtc);
@@ -4112,6 +4138,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
@@ -4121,8 +4150,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4162,6 +4189,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_pipe(dev_priv, pipe);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, false);
+
ironlake_pfit_disable(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4200,7 +4230,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4233,23 +4262,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
-
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
intel_crtc->active = false;
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_disable_shared_dpll(intel_crtc);
}
static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -4258,10 +4289,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
intel_put_shared_dpll(intel_crtc);
}
-static void haswell_crtc_off(struct drm_crtc *crtc)
-{
- intel_ddi_put_crtc_pll(crtc);
-}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
@@ -4287,6 +4314,23 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+static enum intel_display_power_domain port_to_power_domain(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+ case PORT_B:
+ return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+ case PORT_C:
+ return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+ case PORT_D:
+ return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ default:
+ WARN_ON_ONCE(1);
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+}
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
@@ -4305,19 +4349,10 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- switch (intel_dig_port->port) {
- case PORT_A:
- return POWER_DOMAIN_PORT_DDI_A_4_LANES;
- case PORT_B:
- return POWER_DOMAIN_PORT_DDI_B_4_LANES;
- case PORT_C:
- return POWER_DOMAIN_PORT_DDI_C_4_LANES;
- case PORT_D:
- return POWER_DOMAIN_PORT_DDI_D_4_LANES;
- default:
- WARN_ON_ONCE(1);
- return POWER_DOMAIN_PORT_OTHER;
- }
+ return port_to_power_domain(intel_dig_port->port);
+ case INTEL_OUTPUT_DP_MST:
+ intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+ return port_to_power_domain(intel_dig_port->port);
case INTEL_OUTPUT_ANALOG:
return POWER_DOMAIN_PORT_CRT;
case INTEL_OUTPUT_DSI:
@@ -4333,7 +4368,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
unsigned long mask;
enum transcoder transcoder;
@@ -4341,7 +4375,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (pfit_enabled)
+ if (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4398,7 +4433,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false);
}
-int valleyview_get_vco(struct drm_i915_private *dev_priv)
+/* returns HPLL frequency in kHz */
+static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4408,7 +4444,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv)
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock);
- return vco_freq[hpll_freq];
+ return vco_freq[hpll_freq] * 1000;
+}
+
+static void vlv_update_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+ dev_priv->vlv_cdclk_freq);
+
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
}
/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -4417,12 +4469,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
- WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
- dev_priv->vlv_cdclk_freq = cdclk;
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
- if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
- else if (cdclk == 266)
+ else if (cdclk == 266667)
cmd = 1;
else
cmd = 0;
@@ -4439,18 +4490,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- if (cdclk == 400) {
+ if (cdclk == 400000) {
u32 divider, vco;
vco = valleyview_get_vco(dev_priv);
- divider = ((vco << 1) / cdclk) - 1;
+ divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- val &= ~0xf;
+ val &= ~DISPLAY_FREQUENCY_VALUES;
val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+ if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ 50))
+ DRM_ERROR("timed out waiting for CDclk change\n");
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -4463,54 +4519,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
* For high bandwidth configs, we set a higher latency in the bunit
* so that the core display fetch happens in time to avoid underruns.
*/
- if (cdclk == 400)
+ if (cdclk == 400000)
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock);
- /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
- intel_i2c_reset(dev);
-}
-
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
-{
- int cur_cdclk, vco;
- int divider;
-
- vco = valleyview_get_vco(dev_priv);
-
- mutex_lock(&dev_priv->dpio_lock);
- divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- mutex_unlock(&dev_priv->dpio_lock);
-
- divider &= 0xf;
-
- cur_cdclk = (vco << 1) / (divider + 1);
-
- return cur_cdclk;
+ vlv_update_cdclk(dev);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
+ int vco = valleyview_get_vco(dev_priv);
+ int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
- * 320MHz
+ * 320/333MHz (depends on HPLL freq)
* 400MHz
* So we check to see whether we're above 90% of the lower bin and
* adjust if needed.
+ *
+ * We seem to get an unstable or solid color picture at 200MHz.
+ * Not sure what's wrong. For now use 200MHz only when all pipes
+ * are off.
*/
- if (max_pixclk > 288000) {
- return 400;
- } else if (max_pixclk > 240000) {
- return 320;
- } else
- return 266;
- /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+ if (max_pixclk > freq_320*9/10)
+ return 400000;
+ else if (max_pixclk > 266667*9/10)
+ return freq_320;
+ else if (max_pixclk > 0)
+ return 266667;
+ else
+ return 200000;
}
/* compute the max pixel clock for new configuration */
@@ -4633,8 +4678,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_enable_planes(crtc);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4727,8 +4770,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4768,6 +4809,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4776,9 +4827,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
+ * We also need to wait on all gmch platforms because of the
+ * self-refresh mode constraint explained above.
*/
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -4805,7 +4857,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4843,23 +4894,49 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
}
}
+/* Master function to enable/disable CRTC and corresponding power wells */
+void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain domain;
+ unsigned long domains;
+
+ if (enable) {
+ if (!intel_crtc->active) {
+ domains = get_crtc_power_domains(crtc);
+ for_each_power_domain(domain, domains)
+ intel_display_power_get(dev_priv, domain);
+ intel_crtc->enabled_power_domains = domains;
+
+ dev_priv->display.crtc_enable(crtc);
+ }
+ } else {
+ if (intel_crtc->active) {
+ dev_priv->display.crtc_disable(crtc);
+
+ domains = intel_crtc->enabled_power_domains;
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+ intel_crtc->enabled_power_domains = 0;
+ }
+ }
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
void intel_crtc_update_dpms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
bool enable = false;
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
enable |= intel_encoder->connectors_active;
- if (enable)
- dev_priv->display.crtc_enable(crtc);
- else
- dev_priv->display.crtc_disable(crtc);
+ intel_crtc_control(crtc, enable);
intel_crtc_update_sarea(crtc, enable);
}
@@ -4869,6 +4946,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
@@ -4877,13 +4956,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
- assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
- assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
- assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
-
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, NULL,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = NULL;
}
@@ -4939,24 +5016,31 @@ static void intel_connector_check_state(struct intel_connector *connector)
connector->base.base.id,
connector->base.name);
+ /* there is no real hw state for MST connectors */
+ if (connector->mst_port)
+ return;
+
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
- WARN(!encoder->connectors_active,
- "encoder->connectors_active not set\n");
- encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- WARN(!encoder_enabled, "encoder not enabled\n");
- if (WARN_ON(!encoder->base.crtc))
- return;
+ if (encoder) {
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
- crtc = encoder->base.crtc;
+ crtc = encoder->base.crtc;
- WARN(!crtc->enabled, "crtc not enabled\n");
- WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- WARN(pipe != to_intel_crtc(crtc)->pipe,
- "encoder active on the wrong pipe\n");
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
}
@@ -5161,9 +5245,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
- /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
- * clock survives for now. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ /*
+ * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
+ * old clock survives for now.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
if (pipe_config->has_pch_encoder)
@@ -5174,7 +5260,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
- return 400000; /* FIXME */
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int vco = valleyview_get_vco(dev_priv);
+ u32 val;
+ int divider;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ divider = val & DISPLAY_FREQUENCY_VALUES;
+
+ WARN((val & DISPLAY_FREQUENCY_STATUS) !=
+ (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ "cdclk change in progress\n");
+
+ return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -6060,6 +6161,10 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
u32 mdiv;
int refclk = 100000;
+ /* In case of MIPI DPLL will not even be used */
+ if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+ return;
+
mutex_lock(&dev_priv->dpio_lock);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
@@ -6125,8 +6230,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7145,8 +7250,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7163,6 +7268,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -7237,7 +7346,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
@@ -7245,14 +7353,15 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
- WARN(plls->spll_refcount, "SPLL enabled\n");
- WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
- WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+ WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+ WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
+ if (IS_HASWELL(dev))
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
@@ -7265,7 +7374,17 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
+ WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (IS_HASWELL(dev))
+ return I915_READ(D_COMP_HSW);
+ else
+ return I915_READ(D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
@@ -7276,12 +7395,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
- DRM_ERROR("Failed to disable D_COMP\n");
+ DRM_ERROR("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
- I915_WRITE(D_COMP, val);
+ I915_WRITE(D_COMP_BDW, val);
+ POSTING_READ(D_COMP_BDW);
}
- POSTING_READ(D_COMP);
}
/*
@@ -7319,12 +7438,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n");
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
ndelay(100);
- if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -7373,7 +7493,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
POSTING_READ(LCPLL_CTL);
}
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
@@ -7479,13 +7599,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
- intel_ddi_pll_enable(intel_crtc);
intel_crtc->lowfreq_avail = false;
return 0;
}
+static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll;
+ enum port port;
+ uint32_t tmp;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+
+ pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+ switch (pipe_config->ddi_pll_sel) {
+ case PORT_CLK_SEL_WRPLL1:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ break;
+ }
+
+ if (pipe_config->shared_dpll >= 0) {
+ pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+ WARN_ON(!pll->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
+ }
+
+ /*
+ * Haswell has only FDI/PCH transcoder A. It is which is connected to
+ * DDI E. So just check whether this pipe is wired to DDI E and whether
+ * the PCH transcoder is on.
+ */
+ if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ }
+}
+
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7531,22 +7697,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
return false;
- /*
- * Haswell has only FDI/PCH transcoder A. It is which is connected to
- * DDI E. So just check whether this pipe is wired to DDI E and whether
- * the PCH transcoder is on.
- */
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
- I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
- pipe_config->has_pch_encoder = true;
-
- tmp = I915_READ(FDI_RX_CTL(PIPE_A));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
- }
+ haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
@@ -7991,8 +8142,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int x = intel_crtc->cursor_x;
- int y = intel_crtc->cursor_y;
+ int x = crtc->cursor_x;
+ int y = crtc->cursor_y;
u32 base = 0, pos = 0;
if (on)
@@ -8036,21 +8187,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
intel_crtc->cursor_base = base;
}
-static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file,
- uint32_t handle,
- uint32_t width, uint32_t height)
+/*
+ * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
+ *
+ * Note that the object's reference will be consumed if the update fails. If
+ * the update succeeds, the reference of the old object (if any) will be
+ * consumed.
+ */
+static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
+ struct drm_i915_gem_object *obj,
+ uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj;
+ enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
uint32_t addr;
int ret;
/* if we want to turn off the cursor ignore width and height */
- if (!handle) {
+ if (!obj) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
@@ -8066,12 +8223,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
- if (&obj->base == NULL)
- return -ENOENT;
-
if (obj->base.size < width * height * 4) {
- DRM_DEBUG_KMS("buffer is to small\n");
+ DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
}
@@ -8126,9 +8279,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
- drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
+ i915_gem_track_fb(intel_crtc->cursor_bo, obj,
+ INTEL_FRONTBUFFER_CURSOR(pipe));
mutex_unlock(&dev->struct_mutex);
old_width = intel_crtc->cursor_width;
@@ -8144,6 +8298,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
}
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
@@ -8154,19 +8310,6 @@ fail:
return ret;
}
-static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
- intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
-
- if (intel_crtc->active)
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
- return 0;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -8242,7 +8385,7 @@ static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
+ return PAGE_ALIGN(pitch * mode->vdisplay);
}
static struct drm_framebuffer *
@@ -8667,16 +8810,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_increase_pllclock(struct drm_crtc *crtc)
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8704,7 +8845,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8773,28 +8914,179 @@ out:
intel_runtime_pm_put(dev_priv);
}
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ struct intel_engine_cs *ring)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_crtc *crtc;
+ enum pipe pipe;
if (!i915.powersave)
return;
- for_each_crtc(dev, crtc) {
- if (!crtc->primary->fb)
- continue;
-
- if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
+ for_each_pipe(pipe) {
+ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ if (ring) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits
+ |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits
+ &= ~obj->frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+ intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some oustanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Delay flushing when rings are still busy.*/
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+ intel_edp_psr_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned frontbuffer_bits;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ frontbuffer_bits = obj->frontbuffer_bits;
+
+ if (retire) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.flip_bits
+ |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8812,8 +9104,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
- intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
-
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -8824,6 +9114,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
+ enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
@@ -8833,6 +9124,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
+ intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9202,6 +9495,150 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
return 0;
}
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *obj)
+{
+ /*
+ * This is not being used for older platforms, because
+ * non-availability of flip done interrupt forces us to use
+ * CS flips. Older platforms derive flip done using some clever
+ * tricks involving the flip_pending status bits and vblank irqs.
+ * So using MMIO flips there would disrupt this mechanism.
+ */
+
+ if (ring == NULL)
+ return true;
+
+ if (INTEL_INFO(ring->dev)->gen < 5)
+ return false;
+
+ if (i915.use_mmio_flip < 0)
+ return false;
+ else if (i915.use_mmio_flip > 0)
+ return true;
+ else
+ return ring != obj->ring;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb =
+ to_intel_framebuffer(intel_crtc->base.primary->fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ u32 dspcntr;
+ u32 reg;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ reg = DSPCNTR(intel_crtc->plane);
+ dspcntr = I915_READ(reg);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+ I915_WRITE(reg, dspcntr);
+
+ I915_WRITE(DSPSURF(intel_crtc->plane),
+ intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *ring;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ if (!obj->last_write_seqno)
+ return 0;
+
+ ring = obj->ring;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_write_seqno))
+ return 0;
+
+ ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return 0;
+
+ return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_crtc *intel_crtc;
+ unsigned long irq_flags;
+ u32 seqno;
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ for_each_intel_crtc(ring->dev, intel_crtc) {
+ struct intel_mmio_flip *mmio_flip;
+
+ mmio_flip = &intel_crtc->mmio_flip;
+ if (mmio_flip->seqno == 0)
+ continue;
+
+ if (ring->id != mmio_flip->ring_id)
+ continue;
+
+ if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+ intel_do_mmio_flip(intel_crtc);
+ mmio_flip->seqno = 0;
+ ring->irq_put(ring);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long irq_flags;
+ int ret;
+
+ if (WARN_ON(intel_crtc->mmio_flip.seqno))
+ return -EBUSY;
+
+ ret = intel_postpone_flip(obj);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+ intel_do_mmio_flip(intel_crtc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring_id = obj->ring->id;
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+ /*
+ * Double check to catch cases where irq fired before
+ * mmio flip data was ready
+ */
+ intel_notify_mmio_flip(obj->ring);
+ return 0;
+}
+
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -9220,13 +9657,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
unsigned long flags;
int ret;
+ /*
+ * drm_mode_page_flip_ioctl() should already catch this, but double
+ * check to be safe. In the future we may enable pageflipping from
+ * a disabled primary plane.
+ */
+ if (WARN_ON(intel_fb_obj(old_fb) == NULL))
+ return -EBUSY;
+
/* Can't change pixel format via MI display flips. */
if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL;
@@ -9249,7 +9695,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ work->old_fb_obj = intel_fb_obj(old_fb);
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
@@ -9290,10 +9736,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
+ if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+ /* vlv: DISPLAY_FLIP fails to change tiling */
+ ring = NULL;
+ } else if (IS_IVYBRIDGE(dev)) {
+ ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
if (ring == NULL || ring->id != RCS)
@@ -9309,12 +9760,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (use_mmio_flip(ring, obj))
+ ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
+ else
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
if (ret)
goto cleanup_unpin;
+ i915_gem_track_fb(work->old_fb_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
+
intel_disable_fbc(dev);
- intel_mark_fb_busy(obj, NULL);
+ intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -9344,7 +9803,7 @@ out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
- drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ drm_send_vblank_event(dev, pipe, event);
}
return ret;
}
@@ -10017,11 +10476,14 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(double_wide);
+ PIPE_CONF_CHECK_X(ddi_pll_sel);
+
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10083,6 +10545,14 @@ check_encoder_state(struct drm_device *dev)
if (connector->base.dpms != DRM_MODE_DPMS_OFF)
active = true;
}
+ /*
+ * for MST connectors if we unplug the connector is gone
+ * away but the encoder is still connected to a crtc
+ * until a modeset happens in response to the hotplug.
+ */
+ if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue;
+
WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
@@ -10378,20 +10848,23 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
+ obj,
NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
goto done;
}
- old_fb = crtc->primary->fb;
if (old_fb)
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = fb;
@@ -10563,12 +11036,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->primary->fb != set->fb) {
- /* If we have no fb then treat it as a full mode set */
+ /*
+ * If we have no fb, we can only flip as long as the crtc is
+ * active, otherwise we need a full mode set. The crtc may
+ * be active if we've only disabled the primary plane, or
+ * in fastboot situations.
+ */
if (set->crtc->primary->fb == NULL) {
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
- if (intel_crtc->active && i915.fastboot) {
+ if (intel_crtc->active) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
@@ -10620,7 +11098,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base) {
- connector->new_encoder = connector->encoder;
+ connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
break;
}
}
@@ -10666,7 +11144,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
new_crtc)) {
return -EINVAL;
}
- connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
@@ -10700,7 +11178,12 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
-
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder)
+ if (connector->new_encoder != connector->encoder)
+ connector->encoder = connector->new_encoder;
+ }
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -10806,10 +11289,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+
intel_crtc_wait_for_pending_flips(set->crtc);
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
+
+ /*
+ * We need to make sure the primary plane is re-enabled if it
+ * has previously been turned off.
+ */
+ if (!intel_crtc->primary_enabled && ret == 0) {
+ WARN_ON(!intel_crtc->active);
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+ }
+
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
@@ -10850,26 +11347,21 @@ out_config:
}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
-static void intel_cpu_pll_init(struct drm_device *dev)
-{
- if (HAS_DDI(dev))
- intel_ddi_pll_init(dev);
-}
-
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
@@ -10951,7 +11443,9 @@ static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+ else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev);
else
dev_priv->num_shared_dpll = 0;
@@ -10959,17 +11453,328 @@ static void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
+static int
+intel_primary_plane_disable(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc;
+
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ intel_crtc = to_intel_crtc(plane->crtc);
+
+ /*
+ * Even though we checked plane->fb above, it's still possible that
+ * the primary plane has been implicitly disabled because the crtc
+ * coordinates given weren't visible, or because we detected
+ * that it was 100% covered by a sprite plane. Or, the CRTC may be
+ * off and we've set a fb, but haven't actually turned on the CRTC yet.
+ * In either case, we need to unpin the FB and let the fb pointer get
+ * updated, but otherwise we don't need to touch the hardware.
+ */
+ if (!intel_crtc->primary_enabled)
+ goto disable_unpin;
+
+ intel_crtc_wait_for_pending_flips(plane->crtc);
+ intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
+ intel_plane->pipe);
+disable_unpin:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+ intel_unpin_fb_obj(intel_fb_obj(plane->fb));
+ mutex_unlock(&dev->struct_mutex);
+ plane->fb = NULL;
+
+ return 0;
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &visible);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If the CRTC isn't enabled, we're just pinning the framebuffer,
+ * updating the fb pointer, and returning without touching the
+ * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
+ * turn on the display with all planes setup as desired.
+ */
+ if (!crtc->enabled) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * If we already called setplane while the crtc was disabled,
+ * we may have an fb pinned; unpin it.
+ */
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ /* Pin and return without programming hardware */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+ }
+
+ intel_crtc_wait_for_pending_flips(crtc);
+
+ /*
+ * If clipping results in a non-visible primary plane, we'll disable
+ * the primary plane. Note that this is a bit different than what
+ * happens if userspace explicitly disables the plane by passing fb=0
+ * because plane->fb still gets set and pinned.
+ */
+ if (!visible) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * Try to pin the new fb first so that we can bail out if we
+ * fail.
+ */
+ if (plane->fb != fb) {
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ if (intel_crtc->primary_enabled)
+ intel_disable_primary_hw_plane(dev_priv,
+ intel_plane->plane,
+ intel_plane->pipe);
+
+
+ if (plane->fb != fb)
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
+ if (ret)
+ return ret;
+
+ if (!intel_crtc->primary_enabled)
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+
+ return 0;
+}
+
+/* Common destruction function for both primary and cursor planes */
+static void intel_plane_destroy(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+static const struct drm_plane_funcs intel_primary_plane_funcs = {
+ .update_plane = intel_primary_plane_setplane,
+ .disable_plane = intel_primary_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *primary;
+ const uint32_t *intel_primary_formats;
+ int num_formats;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL)
+ return NULL;
+
+ primary->can_scale = false;
+ primary->max_downscale = 1;
+ primary->pipe = pipe;
+ primary->plane = pipe;
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+ primary->plane = !pipe;
+
+ if (INTEL_INFO(dev)->gen <= 3) {
+ intel_primary_formats = intel_primary_formats_gen2;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
+ } else {
+ intel_primary_formats = intel_primary_formats_gen4;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
+ }
+
+ drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_primary_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
+ return &primary->base;
+}
+
+static int
+intel_cursor_plane_disable(struct drm_plane *plane)
+{
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->config.pipe_src_w,
+ .y2 = intel_crtc->config.pipe_src_h,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true, &visible);
+ if (ret)
+ return ret;
+
+ crtc->cursor_x = crtc_x;
+ crtc->cursor_y = crtc_y;
+ if (fb != crtc->cursor->fb) {
+ return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
+ } else {
+ intel_crtc_update_cursor(crtc, visible);
+ return 0;
+ }
+}
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_cursor_plane_update,
+ .disable_plane = intel_cursor_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *cursor;
+
+ cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+ if (cursor == NULL)
+ return NULL;
+
+ cursor->can_scale = false;
+ cursor->max_downscale = 1;
+ cursor->pipe = pipe;
+ cursor->plane = pipe;
+
+ drm_universal_plane_init(dev, &cursor->base, 0,
+ &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ DRM_PLANE_TYPE_CURSOR);
+ return &cursor->base;
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- int i;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int i, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
- drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+ primary = intel_primary_plane_create(dev, pipe);
+ if (!primary)
+ goto fail;
+
+ cursor = intel_cursor_plane_create(dev, pipe);
+ if (!cursor)
+ goto fail;
+
+ ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
+ cursor, &intel_crtc_funcs);
+ if (ret)
+ goto fail;
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
@@ -10980,7 +11785,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to plane B. Hence we want plane A feeding pipe B.
+ * is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
@@ -11002,6 +11807,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+ return;
+
+fail:
+ if (primary)
+ drm_plane_cleanup(primary);
+ if (cursor)
+ drm_plane_cleanup(cursor);
+ kfree(intel_crtc);
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -11021,21 +11834,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
- DRM_MODE_OBJECT_CRTC);
+ drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
- if (!drmmode_obj) {
+ if (!drmmode_crtc) {
DRM_ERROR("no such CRTC id\n");
return -ENOENT;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
@@ -11236,6 +12048,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
+ intel_edp_psr_init(dev);
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
@@ -11249,11 +12063,14 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
+ struct drm_device *dev = fb->dev;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
+ mutex_lock(&dev->struct_mutex);
WARN_ON(!intel_fb->obj->framebuffer_references--);
- drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+ drm_gem_object_unreference(&intel_fb->obj->base);
+ mutex_unlock(&dev->struct_mutex);
kfree(intel_fb);
}
@@ -11438,7 +12255,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -11722,6 +12539,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
intel_prepare_ddi(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_update_cdclk(dev);
+
intel_init_clock_gating(dev);
intel_reset_dpio(dev);
@@ -11798,7 +12618,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_dpio(dev);
intel_reset_dpio(dev);
- intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
/* Just disable it once at startup */
@@ -12024,6 +12843,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.base.id,
encoder->base.name);
encoder->disable(encoder);
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
encoder->connectors_active = false;
@@ -12108,10 +12929,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
- /* FIXME: Smash this into the new shared dpll infrastructure. */
- if (HAS_DDI(dev))
- intel_ddi_setup_hw_pll_state(dev);
-
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -12125,6 +12942,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
+
+ if (pll->refcount)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -12242,7 +13062,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_crtc *c;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
@@ -12259,11 +13079,11 @@ void intel_modeset_gem_init(struct drm_device *dev)
*/
mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
- if (!c->primary->fb)
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+ if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -12278,13 +13098,12 @@ void intel_connector_unregister(struct intel_connector *intel_connector)
struct drm_connector *connector = &intel_connector->base;
intel_panel_destroy_backlight(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
struct drm_connector *connector;
/*
@@ -12294,6 +13113,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
+ dev_priv->pm._irqs_disabled = true;
+
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -12304,14 +13125,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- for_each_crtc(dev, crtc) {
- /* Skip inactive CRTCs */
- if (!crtc->primary->fb)
- continue;
-
- intel_increase_pllclock(crtc);
- }
-
intel_disable_fbc(dev);
intel_disable_gt_powersave(dev);
@@ -12479,7 +13292,7 @@ intel_display_capture_error_state(struct drm_device *dev)
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8a1a4fbc06ac..ee3942f0b068 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -114,7 +114,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static int
+int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -773,12 +773,29 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
- sysfs_remove_link(&intel_connector->base.kdev->kobj,
- intel_dp->aux.ddc.dev.kobj.name);
+ if (!intel_connector->mst_port)
+ sysfs_remove_link(&intel_connector->base.kdev->kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
intel_connector_unregister(intel_connector);
}
static void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ }
+}
+
+static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
@@ -789,8 +806,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
if (IS_G4X(dev)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (IS_HASWELL(dev)) {
- /* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -961,7 +976,10 @@ found:
&pipe_config->dp_m2_n2);
}
- intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+ if (HAS_DDI(dev))
+ hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+ else
+ intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true;
}
@@ -1267,6 +1285,19 @@ static void edp_panel_vdd_work(struct work_struct *__work)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
+static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
+{
+ unsigned long delay;
+
+ /*
+ * Queue the timer to fire a long time from now (relative to the power
+ * down delay) to keep the panel power up across a sequence of
+ * operations.
+ */
+ delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
+ schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
+}
+
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
@@ -1276,17 +1307,10 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
intel_dp->want_panel_vdd = false;
- if (sync) {
+ if (sync)
edp_panel_vdd_off_sync(intel_dp);
- } else {
- /*
- * Queue the timer to fire a long
- * time from now (relative to the power down delay)
- * to keep the panel power up across a sequence of operations
- */
- schedule_delayed_work(&intel_dp->panel_vdd_work,
- msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
- }
+ else
+ edp_panel_vdd_schedule_off(intel_dp);
}
void intel_edp_panel_on(struct intel_dp *intel_dp)
@@ -1349,8 +1373,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- edp_wait_backlight_off(intel_dp);
-
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp);
@@ -1386,6 +1408,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("\n");
+
+ intel_panel_enable_backlight(intel_dp->attached_connector);
+
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
@@ -1400,8 +1425,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
-
- intel_panel_enable_backlight(intel_dp->attached_connector);
}
void intel_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1414,8 +1437,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- intel_panel_disable_backlight(intel_dp->attached_connector);
-
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
@@ -1425,6 +1446,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
intel_dp->last_backlight_off = jiffies;
+
+ edp_wait_backlight_off(intel_dp);
+
+ intel_panel_disable_backlight(intel_dp->attached_connector);
}
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1646,11 +1671,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return dev_priv->psr.sink_support;
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1698,9 +1721,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
- if (intel_dp->psr_setup_done)
- return;
-
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
@@ -1712,22 +1732,25 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
- intel_dp->psr_setup_done = true;
}
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
+ bool only_standby = false;
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
/* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
@@ -1746,18 +1769,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ bool only_standby = false;
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
+ val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@@ -1775,18 +1804,14 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- dev_priv->psr.source_ok = false;
+ lockdep_assert_held(&dev_priv->psr.lock);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
- if (!HAS_PSR(dev)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
- return false;
- }
+ dev_priv->psr.source_ok = false;
- if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
- (dig_port->port != PORT_A)) {
+ if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
@@ -1796,29 +1821,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- crtc = dig_port->base.base.crtc;
- if (crtc == NULL) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc_active(crtc)) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
- return false;
- }
-
- if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
- return false;
- }
+ /* Below limitations aren't valid for Broadwell */
+ if (IS_BROADWELL(dev))
+ goto out;
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
@@ -1831,35 +1836,60 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ out:
dev_priv->psr.source_ok = true;
return true;
}
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- if (!intel_edp_psr_match_conditions(intel_dp) ||
- intel_edp_is_psr_enabled(dev))
- return;
-
- /* Setup PSR once */
- intel_edp_psr_setup(intel_dp);
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
}
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return;
+ }
- if (intel_edp_psr_match_conditions(intel_dp) &&
- !intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR already in use\n");
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp))
+ dev_priv->psr.enabled = intel_dp;
+ mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1867,36 +1897,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!intel_edp_is_psr_enabled(dev))
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
return;
+ }
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
+
+ dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
+
+ cancel_delayed_work_sync(&dev_priv->psr.work);
}
-void intel_edp_psr_update(struct drm_device *dev)
+static void intel_edp_psr_work(struct work_struct *work)
{
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp = NULL;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
- if (encoder->type == INTEL_OUTPUT_EDP) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
- if (!is_edp_psr(dev))
- return;
+ if (!intel_dp)
+ goto unlock;
- if (!intel_edp_psr_match_conditions(intel_dp))
- intel_edp_psr_disable(intel_dp);
- else
- if (!intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
- }
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (dev_priv->psr.busy_frontbuffer_bits)
+ goto unlock;
+
+ intel_edp_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_edp_psr_do_exit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+
+ I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+ dev_priv->psr.active = false;
+ }
+
+}
+
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_edp_psr_do_exit(dev);
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /*
+ * On Haswell sprite plane updates don't result in a psr invalidating
+ * signal in the hardware. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering.
+ */
+ if (IS_HASWELL(dev) &&
+ (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+ intel_edp_psr_do_exit(dev);
+
+ if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+ mutex_init(&dev_priv->psr.lock);
}
static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2152,6 +2282,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport);
}
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
@@ -2189,18 +2383,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-/*
- * These are source-specific values; current Intel hardware supports
- * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
- */
-
+/* These are source-specific values. */
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+ if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2216,18 +2406,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROADWELL(dev)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2699,41 +2878,6 @@ intel_hsw_signal_levels(uint8_t train_set)
}
}
-static uint32_t
-intel_bdw_signal_levels(uint8_t train_set)
-{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
-
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
-
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
-
- case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
-
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- }
-}
-
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2744,10 +2888,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_BROADWELL(dev)) {
- signal_levels = intel_bdw_signal_levels(train_set);
- mask = DDI_BUF_EMP_MASK;
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
@@ -3246,6 +3387,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
edp_panel_vdd_off(intel_dp, false);
}
+static bool
+intel_dp_probe_mst(struct intel_dp *intel_dp)
+{
+ u8 buf[1];
+
+ if (!intel_dp->can_mst)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
+ return false;
+
+ _edp_panel_vdd_on(intel_dp);
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+ if (buf[0] & DP_MST_CAP) {
+ DRM_DEBUG_KMS("Sink is MST capable\n");
+ intel_dp->is_mst = true;
+ } else {
+ DRM_DEBUG_KMS("Sink is not MST capable\n");
+ intel_dp->is_mst = false;
+ }
+ }
+ edp_panel_vdd_off(intel_dp, false);
+
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ return intel_dp->is_mst;
+}
+
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -3283,6 +3451,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
sink_irq_vector, 1) == 1;
}
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SINK_COUNT_ESI,
+ sink_irq_vector, 14);
+ if (ret != 14)
+ return false;
+
+ return true;
+}
+
static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
@@ -3290,6 +3472,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
+static int
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+ bool bret;
+
+ if (intel_dp->is_mst) {
+ u8 esi[16] = { 0 };
+ int ret = 0;
+ int retry;
+ bool handled;
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+go_again:
+ if (bret == true) {
+
+ /* check link status - esi[10] = 0x200c */
+ if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+
+ if (handled) {
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+ wret = drm_dp_dpcd_write(&intel_dp->aux,
+ DP_SINK_COUNT_ESI+1,
+ &esi[1], 3);
+ if (wret == 3) {
+ break;
+ }
+ }
+
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+ if (bret == true) {
+ DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ goto go_again;
+ }
+ } else
+ ret = 0;
+
+ return ret;
+ } else {
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ /* send a hotplug event */
+ drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+ }
+ }
+ return -EINVAL;
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -3298,15 +3537,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
-
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- /* FIXME: This access isn't protected by any locks. */
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
if (!intel_encoder->connectors_active)
return;
@@ -3518,8 +3758,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
-
- intel_runtime_pm_get(dev_priv);
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@@ -3527,6 +3766,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (intel_dp->is_mst) {
+ /* MST devices are disconnected from a monitor POV */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
@@ -3539,6 +3786,16 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp_probe_oui(intel_dp);
+ ret = intel_dp_probe_mst(intel_dp);
+ if (ret) {
+ /* if we are in MST mode then this connector
+ won't appear connected or have anything with EDID on it */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -3555,9 +3812,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
-
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -3734,6 +3988,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
drm_dp_aux_unregister(&intel_dp->aux);
+ intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -3748,6 +4003,11 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
+static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+{
+ intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder));
+}
+
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
@@ -3763,15 +4023,68 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
+ .reset = intel_dp_encoder_reset,
.destroy = intel_dp_encoder_destroy,
};
-static void
+void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ return;
+}
+
+bool
+intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+ intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+
+ DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
+ long_hpd ? "long" : "short");
+
+ if (long_hpd) {
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ goto mst_fail;
+
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ goto mst_fail;
+ }
+
+ intel_dp_probe_oui(intel_dp);
+
+ if (!intel_dp_probe_mst(intel_dp))
+ goto mst_fail;
- intel_dp_check_link_status(intel_dp);
+ } else {
+ if (intel_dp->is_mst) {
+ ret = intel_dp_check_mst_status(intel_dp);
+ if (ret == -EINVAL)
+ goto mst_fail;
+ }
+
+ if (!intel_dp->is_mst) {
+ /*
+ * we'll check the link status via the normal hot plug path later -
+ * but for short hpds we should check it now
+ */
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
+ }
+ return false;
+mst_fail:
+ /* if we were in MST mode, and device is not there get out of MST mode */
+ if (intel_dp->is_mst) {
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ }
+ return true;
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -3822,7 +4135,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
return false;
}
-static void
+void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -4035,6 +4348,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
+ /*
+ * FIXME: This needs proper synchronization with psr state. But really
+ * hard to tell without seeing the user of this function of this code.
+ * Check locking and ordering once that lands.
+ */
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
@@ -4138,6 +4456,32 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
return downclock_mode;
}
+void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
+{
+ struct drm_device *dev = intel_encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp *intel_dp;
+ enum intel_display_power_domain power_domain;
+
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ return;
+
+ intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ if (!edp_have_panel_vdd(intel_dp))
+ return;
+ /*
+ * The VDD bit needs a power domain reference, so if the bit is
+ * already enabled when we boot or resume, grab this reference and
+ * schedule a vdd off, so we don't hold on to the reference
+ * indefinitely.
+ */
+ DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+
+ edp_panel_vdd_schedule_off(intel_dp);
+}
+
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector,
struct edp_power_seq *power_seq)
@@ -4158,13 +4502,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
- /* The VDD bit needs a power domain reference, so if the bit is already
- * enabled when we boot, grab this reference. */
- if (edp_have_panel_vdd(intel_dp)) {
- enum intel_display_power_domain power_domain;
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
- }
+ intel_edp_panel_vdd_sanitize(intel_encoder);
/* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp);
@@ -4288,7 +4626,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
edp_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -4321,7 +4659,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
- intel_dp->psr_setup_done = false;
+ /* init MST on ports that can support it */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (port == PORT_B || port == PORT_C || port == PORT_D) {
+ intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
+ }
+ }
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
drm_dp_aux_unregister(&intel_dp->aux);
@@ -4331,7 +4674,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
}
@@ -4353,6 +4696,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -4379,6 +4723,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
@@ -4408,9 +4753,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
kfree(intel_connector);
}
}
+
+void intel_dp_mst_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* disable MST */
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+ if (intel_dig_port->dp.is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+ }
+ }
+}
+
+void intel_dp_mst_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ int ret;
+
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ if (ret != 0) {
+ intel_dp_check_mst_status(&intel_dig_port->dp);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
new file mode 100644
index 000000000000..d9a7a7865f66
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ int bpp;
+ int lane_count, slots;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_connector *found = NULL, *intel_connector;
+ int mst_pbn;
+
+ pipe_config->dp_encoder_is_mst = true;
+ pipe_config->has_pch_encoder = false;
+ pipe_config->has_dp_encoder = true;
+ bpp = 24;
+ /*
+ * for MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ */
+ lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+ intel_dp->lane_count = lane_count;
+
+ pipe_config->pipe_bpp = 24;
+ pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return false;
+ }
+
+ mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+ pipe_config->pbn = mst_pbn;
+ slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
+
+ intel_link_compute_m_n(bpp, lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ pipe_config->dp_m_n.tu = slots;
+ return true;
+
+}
+
+static void intel_mst_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ if (ret) {
+ DRM_ERROR("failed to update payload %d\n", ret);
+ }
+}
+
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ /* this can fail */
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+ /* and this can also fail */
+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+
+ intel_dp->active_mst_links--;
+ intel_mst->port = NULL;
+ if (intel_dp->active_mst_links == 0) {
+ intel_dig_port->base.post_disable(&intel_dig_port->base);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ }
+}
+
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+ uint32_t temp;
+ struct intel_connector *found = NULL, *intel_connector;
+ int slots;
+ struct drm_crtc *crtc = encoder->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ intel_mst->port = found->port;
+
+ if (intel_dp->active_mst_links == 0) {
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+
+ intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
+ intel_mst->port, intel_crtc->config.pbn, &slots);
+ if (ret == false) {
+ DRM_ERROR("failed to allocate vcpi\n");
+ return;
+ }
+
+
+ intel_dp->active_mst_links++;
+ temp = I915_READ(DP_TP_STATUS(port));
+ I915_WRITE(DP_TP_STATUS(port), temp);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+}
+
+static void intel_mst_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
+ 1))
+ DRM_ERROR("Timed out waiting for ACT sent\n");
+
+ ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+
+ ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+}
+
+static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ *pipe = intel_mst->pipe;
+ if (intel_mst->port)
+ return true;
+ return false;
+}
+
+static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ u32 temp, flags = 0;
+
+ pipe_config->has_dp_encoder = true;
+
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (temp & TRANS_DDI_PHSYNC)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (temp & TRANS_DDI_PVSYNC)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ switch (temp & TRANS_DDI_BPC_MASK) {
+ case TRANS_DDI_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case TRANS_DDI_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case TRANS_DDI_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case TRANS_DDI_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+ pipe_config->adjusted_mode.flags |= flags;
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+}
+
+static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct edid *edid;
+ int ret;
+
+ edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_connector_status
+intel_mst_port_dp_detect(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
+}
+
+static enum drm_connector_status
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ status = intel_mst_port_dp_detect(connector);
+ return status;
+}
+
+static int
+intel_dp_mst_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void
+intel_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_mst_set_property,
+ .destroy = intel_dp_mst_connector_destroy,
+};
+
+static int intel_dp_mst_get_modes(struct drm_connector *connector)
+{
+ return intel_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+intel_dp_mst_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO - validate mode against available PBN for link */
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ return &intel_dp->mst_encoders[0]->base.base;
+}
+
+static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
+ .get_modes = intel_dp_mst_get_modes,
+ .mode_valid = intel_dp_mst_mode_valid,
+ .best_encoder = intel_mst_best_encoder,
+};
+
+static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_mst);
+}
+
+static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
+ .destroy = intel_dp_mst_encoder_destroy,
+};
+
+static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+{
+ if (connector->encoder) {
+ enum pipe pipe;
+ if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+static void intel_connector_add_to_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ int i;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector)
+ return NULL;
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+
+ intel_connector->unregister = intel_connector_unregister;
+ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->mst_port = intel_dp;
+ intel_connector->port = port;
+
+ for (i = PIPE_A; i <= PIPE_C; i++) {
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_dp->mst_encoders[i]->base.base);
+ }
+ intel_dp_add_properties(intel_dp, connector);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_add_to_fbdev(intel_connector);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_connector_register(&intel_connector->base);
+ return connector;
+}
+
+static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+ /* need to nuke the connector */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ intel_connector->unregister(intel_connector);
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_remove_from_fbdev(intel_connector);
+ drm_connector_cleanup(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_reinit_primary_mode_group(dev);
+
+ kfree(intel_connector);
+ DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static struct drm_dp_mst_topology_cbs mst_cbs = {
+ .add_connector = intel_dp_add_mst_connector,
+ .destroy_connector = intel_dp_destroy_mst_connector,
+ .hotplug = intel_dp_mst_hotplug,
+};
+
+static struct intel_dp_mst_encoder *
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst;
+ struct intel_encoder *intel_encoder;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
+
+ if (!intel_mst)
+ return NULL;
+
+ intel_mst->pipe = pipe;
+ intel_encoder = &intel_mst->base;
+ intel_mst->primary = intel_dig_port;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ DRM_MODE_ENCODER_DPMST);
+
+ intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->crtc_mask = 0x7;
+ intel_encoder->cloneable = 0;
+
+ intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->disable = intel_mst_disable_dp;
+ intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+ intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
+ intel_encoder->get_config = intel_dp_mst_enc_get_config;
+
+ return intel_mst;
+
+}
+
+static bool
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+{
+ int i;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ for (i = PIPE_A; i <= PIPE_C; i++)
+ intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+ return true;
+}
+
+int
+intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ int ret;
+
+ intel_dp->can_mst = true;
+ intel_dp->mst_mgr.cbs = &mst_cbs;
+
+ /* create encoders */
+ intel_dp_create_fake_mst_encoders(intel_dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+ if (ret) {
+ intel_dp->can_mst = false;
+ return ret;
+ }
+ return 0;
+}
+
+void
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (!intel_dp->can_mst)
+ return;
+
+ drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+ /* encoders will get killed by normal cleanup */
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f67340ed2c12..4b2664bd5b81 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,7 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
/**
* _wait_for - magic (register) wait macro
@@ -100,6 +100,7 @@
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
+#define INTEL_OUTPUT_DP_MST 11
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -165,6 +166,7 @@ struct intel_panel {
struct {
bool present;
u32 level;
+ u32 min;
u32 max;
bool enabled;
bool combination_mode; /* gen 2/4 only */
@@ -207,6 +209,10 @@ struct intel_connector {
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
+
+ void *port; /* store this opaque as its illegal to dereference it */
+
+ struct intel_dp *mst_port;
};
typedef struct dpll {
@@ -307,6 +313,9 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
+ /* PORT_CLK_SEL for DDI ports. */
+ uint32_t ddi_pll_sel;
+
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -338,6 +347,7 @@ struct intel_crtc_config {
u32 pos;
u32 size;
bool enabled;
+ bool force_thru;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
@@ -347,6 +357,9 @@ struct intel_crtc_config {
bool ips_enabled;
bool double_wide;
+
+ bool dp_encoder_is_mst;
+ int pbn;
};
struct intel_pipe_wm {
@@ -358,6 +371,11 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
+struct intel_mmio_flip {
+ u32 seqno;
+ u32 ring_id;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -384,7 +402,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_base;
@@ -394,8 +411,6 @@ struct intel_crtc {
struct intel_crtc_config *new_config;
bool new_enabled;
- uint32_t ddi_pll_sel;
-
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
@@ -412,10 +427,12 @@ struct intel_crtc {
wait_queue_head_t vbl_wait;
int scanline_offset;
+ struct intel_mmio_flip mmio_flip;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
+ uint32_t vert_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
@@ -428,7 +445,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
- u32 lut_r[1024], lut_g[1024], lut_b[1024];
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
@@ -481,6 +497,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
u32 hdmi_reg;
@@ -491,6 +508,7 @@ struct intel_hdmi {
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
+ enum hdmi_picture_aspect aspect_ratio;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
@@ -499,6 +517,7 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
/**
@@ -537,12 +556,20 @@ struct intel_dp {
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
- bool psr_setup_done;
+
struct notifier_block edp_notifier;
bool use_tps3;
+ bool can_mst; /* this port supports mst */
+ bool is_mst;
+ int active_mst_links;
+ /* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
+ /* mst connector list */
+ struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
+ struct drm_dp_mst_topology_mgr mst_mgr;
+
uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
@@ -566,6 +593,14 @@ struct intel_digital_port {
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ bool (*hpd_pulse)(struct intel_digital_port *, bool);
+};
+
+struct intel_dp_mst_encoder {
+ struct intel_encoder base;
+ enum pipe pipe;
+ struct intel_digital_port *primary;
+ void *port; /* store this opaque as its illegal to dereference it */
};
static inline int
@@ -652,6 +687,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return container_of(encoder, struct intel_digital_port, base.base);
}
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+}
+
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
@@ -676,17 +717,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return !dev_priv->pm._irqs_disabled;
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void i9xx_check_fifo_underruns(struct drm_device *dev);
-
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -705,10 +755,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc);
-void intel_ddi_pll_enable(struct intel_crtc *crtc);
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -716,17 +763,46 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev);
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+/**
+ * intel_frontbuffer_flip - prepare frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+static inline
+void intel_frontbuffer_flip(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
void intel_connector_dpms(struct drm_connector *, int mode);
@@ -767,12 +843,18 @@ __intel_framebuffer_create(struct drm_device *dev,
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+void intel_put_shared_dpll(struct intel_crtc *crtc);
+
+/* modesetting asserts */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -805,7 +887,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
-int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
@@ -826,18 +907,34 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
-void intel_edp_psr_update(struct drm_device *dev);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_init(struct drm_device *dev);
+
+int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
+void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
+void intel_dp_mst_suspend(struct drm_device *dev);
+void intel_dp_mst_resume(struct drm_device *dev);
+int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
+/* intel_dp_mst.c */
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
-bool intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_device *dev);
/* intel_dvo.c */
@@ -920,8 +1017,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max);
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
@@ -940,7 +1037,9 @@ int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
@@ -963,6 +1062,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
@@ -976,8 +1076,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable);
+
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3fd082933c87..670c29a7b5dd 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -92,6 +92,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ /* DSI uses short packets for sync events, so clear mode flags for DSI */
+ adjusted_mode->flags = 0;
+
if (intel_dsi->dev.dev_ops->mode_fixup)
return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
mode, adjusted_mode);
@@ -152,6 +155,8 @@ static void intel_dsi_enable(struct intel_encoder *encoder)
if (intel_dsi->dev.dev_ops->enable)
intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+ wait_for_dsi_fifo_empty(intel_dsi);
+
/* assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe)) & ~LANE_CONFIGURATION_MASK;
temp = temp | intel_dsi->port_bits;
@@ -177,6 +182,10 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
tmp |= DPLL_REFA_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
+ /* update the hw state for DPLL */
+ intel_crtc->config.dpll_hw_state.dpll = DPLL_INTEGRATED_CLOCK_VLV |
+ DPLL_REFA_CLK_ENABLE_VLV;
+
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
@@ -192,6 +201,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
if (intel_dsi->dev.dev_ops->send_otp_cmds)
intel_dsi->dev.dev_ops->send_otp_cmds(&intel_dsi->dev);
+ wait_for_dsi_fifo_empty(intel_dsi);
+
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
intel_dsi_enable(encoder);
@@ -232,6 +243,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
if (is_vid_mode(intel_dsi)) {
+ wait_for_dsi_fifo_empty(intel_dsi);
+
/* de-assert ip_tg_enable signal */
temp = I915_READ(MIPI_PORT_CTRL(pipe));
I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
@@ -261,6 +274,8 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
* some next enable sequence send turn on packet error is observed */
if (intel_dsi->dev.dev_ops->disable)
intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+
+ wait_for_dsi_fifo_empty(intel_dsi);
}
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
@@ -351,9 +366,21 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
+ u32 pclk;
DRM_DEBUG_KMS("\n");
- /* XXX: read flags, set to adjusted_mode */
+ /*
+ * DPLL_MD is not used in case of DSI, reading will get some default value
+ * set dpll_md = 0
+ */
+ pipe_config->dpll_hw_state.dpll_md = 0;
+
+ pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
+ if (!pclk)
+ return;
+
+ pipe_config->adjusted_mode.crtc_clock = pclk;
+ pipe_config->port_clock = pclk;
}
static enum drm_mode_status
@@ -658,7 +685,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
};
-bool intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
@@ -674,29 +701,29 @@ bool intel_dsi_init(struct drm_device *dev)
/* There is no detection method for MIPI so rely on VBT */
if (!dev_priv->vbt.has_mipi)
- return false;
+ return;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else {
+ DRM_ERROR("Unsupported Mipi device to reg base");
+ return;
+ }
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
- return false;
+ return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
- return false;
+ return;
}
intel_encoder = &intel_dsi->base;
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else {
- DRM_ERROR("Unsupported Mipi device to reg base");
- return false;
- }
-
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -743,7 +770,7 @@ bool intel_dsi_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
if (!fixed_mode) {
@@ -754,12 +781,10 @@ bool intel_dsi_init(struct drm_device *dev)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- return true;
+ return;
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
-
- return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 31db33d3e5cc..fd51867fd0d3 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -132,6 +132,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern struct intel_dsi_dev_ops vbt_generic_dsi_display_ops;
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 933c86305237..7f1430ac8543 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -419,3 +419,19 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
return 0;
}
+
+void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi)
+{
+ struct drm_encoder *encoder = &intel_dsi->base.base;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 mask;
+
+ mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
+ LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
+
+ if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 100))
+ DRM_ERROR("DPI FIFOs are not empty\n");
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
index 9a18cbfa5460..46aa1acc00eb 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.h
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.h
@@ -51,6 +51,7 @@ int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen);
int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs);
+void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi);
/* XXX: questionable write helpers */
static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 21a0d348cedc..47c7584a4aa0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
break;
- };
+ }
data += len;
@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
- intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
switch (intel_dsi->escape_clk_div) {
case 0:
@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
*
* prepare count
*/
- ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
/* exit zero count */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index ba79ec19da3b..d8bb1ea2f0da 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -298,3 +298,84 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+
+static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
+{
+ int bpp;
+
+ switch (pixel_format) {
+ default:
+ case VID_MODE_FORMAT_RGB888:
+ case VID_MODE_FORMAT_RGB666_LOOSE:
+ bpp = 24;
+ break;
+ case VID_MODE_FORMAT_RGB666:
+ bpp = 18;
+ break;
+ case VID_MODE_FORMAT_RGB565:
+ bpp = 16;
+ break;
+ }
+
+ WARN(bpp != pipe_bpp,
+ "bpp match assertion failure (expected %d, current %d)\n",
+ bpp, pipe_bpp);
+}
+
+u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 dsi_clock, pclk;
+ u32 pll_ctl, pll_div;
+ u32 m = 0, p = 0;
+ int refclk = 25000;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+
+ mutex_lock(&dev_priv->dpio_lock);
+ pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+ pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ /* mask out other bits and extract the P1 divisor */
+ pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
+ pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
+
+ /* mask out the other bits and extract the M1 divisor */
+ pll_div &= DSI_PLL_M1_DIV_MASK;
+ pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
+
+ while (pll_ctl) {
+ pll_ctl = pll_ctl >> 1;
+ p++;
+ }
+ p--;
+
+ if (!p) {
+ DRM_ERROR("wrong P1 divisor\n");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lfsr_converts); i++) {
+ if (lfsr_converts[i] == pll_div)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(lfsr_converts)) {
+ DRM_ERROR("wrong m_seed programmed\n");
+ return 0;
+ }
+
+ m = i + 62;
+
+ dsi_clock = (m * refclk) / p;
+
+ /* pixel_format and pipe_bpp should agree */
+ assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
+
+ pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
+
+ return pclk;
+}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a3631c0a5c28..56b47d2ffaf7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
@@ -558,7 +566,7 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->panel_wants_dither = true;
}
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 088fe9378a4c..f475414671d8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -43,10 +43,36 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
+ true);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ .fb_set_par = intel_fbdev_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -81,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+ DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
@@ -452,7 +478,7 @@ out:
return true;
}
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.initial_config = intel_fb_initial_config,
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
@@ -623,7 +649,8 @@ int intel_fbdev_init(struct drm_device *dev)
if (ifbdev == NULL)
return -ENOMEM;
- ifbdev->helper.funcs = &intel_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index eee2bbec2958..f9151f6641d9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe frame;
int ret;
+ /* Set user selected PAR to incoming mode's member */
+ adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
@@ -879,7 +882,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
struct intel_encoder *encoder;
int count = 0, count_hdmi = 0;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
@@ -1124,6 +1127,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == connector->dev->mode_config.aspect_ratio_property) {
+ switch (val) {
+ case DRM_MODE_PICTURE_ASPECT_NONE:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_4_3:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_16_9:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -1229,6 +1249,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1416,11 +1500,22 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
};
static void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_aspect_ratio_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.aspect_ratio_property,
+ DRM_MODE_PICTURE_ASPECT_NONE);
+}
+
+static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
+ intel_attach_aspect_ratio_property(connector);
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1467,7 +1562,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
- } else if (!HAS_PCH_SPLIT(dev)) {
+ } else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (HAS_DDI(dev)) {
@@ -1490,7 +1585,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
@@ -1528,6 +1623,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d33b61d0dd33..b31088a551f2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,11 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-enum disp_clk {
- CDCLK,
- CZCLK
-};
-
struct gmbus_port {
const char *name;
int reg;
@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
-static int get_disp_clk_div(struct drm_i915_private *dev_priv,
- enum disp_clk clk)
-{
- u32 reg_val;
- int clk_ratio;
-
- reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
-
- if (clk == CDCLK)
- clk_ratio =
- ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
- else
- clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
-
- return clk_ratio;
-}
-
-static void gmbus_set_freq(struct drm_i915_private *dev_priv)
-{
- int vco, gmbus_freq = 0, cdclk_div;
-
- BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
-
- vco = valleyview_get_vco(dev_priv);
-
- /* Get the CDCLK divide ratio */
- cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
-
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
- if (cdclk_div)
- gmbus_freq = (vco << 1) / cdclk_div;
-
- if (WARN_ON(gmbus_freq == 0))
- return;
-
- I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
-}
-
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * In BIOS-less system, program the correct gmbus frequency
- * before reading edid.
- */
- if (IS_VALLEYVIEW(dev))
- gmbus_set_freq(dev_priv);
-
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 5e5a72fca5fb..881361c0f27e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
bool is_dual_link;
u32 reg;
+ u32 a3_power;
struct intel_lvds_connector *attached_connector;
};
@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
@@ -172,8 +178,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
+ * panels behave in the two modes. For now, let's just maintain the
+ * value we got from the BIOS.
*/
+ temp &= ~LVDS_A3_POWER_MASK;
+ temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
@@ -271,7 +280,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -286,8 +294,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false;
}
- if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
- LVDS_A3_POWER_UP)
+ if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
else
lvds_bpp = 6*3;
@@ -1088,6 +1095,9 @@ out:
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
+ lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
+ LVDS_A3_POWER_MASK;
+
/*
* Unlock registers and just
* leave them unlocked
@@ -1104,7 +1114,7 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds_connector->lid_notifier.notifier_call = NULL;
}
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4f6b53998d79..ca52ad2ae7d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
@@ -427,7 +428,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
- intel_panel_set_backlight(intel_connector, bclp, 255);
+ intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index daa118978eec..dc2f4f26c961 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
}
intel_overlay_release_old_vid_tail(overlay);
+
+
+ i915_gem_track_fb(overlay->old_vid_bo, NULL,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
return 0;
}
@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
u32 swidth, swidthsw, sheight, ostride;
+ enum pipe pipe = overlay->crtc->pipe;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
oconfig |= OCONF_CSC_MODE_BT709;
- oconfig |= overlay->crtc->pipe == 0 ?
+ oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
+ i915_gem_track_fb(overlay->vid_bo, new_bo,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
+ intel_frontbuffer_flip(dev,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
return 0;
out_unpin:
@@ -1028,7 +1039,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_intel_overlay_put_image *put_image_rec = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
@@ -1057,13 +1068,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (!params)
return -ENOMEM;
- drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!drmmode_obj) {
+ drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+ if (!drmmode_crtc) {
ret = -ENOENT;
goto out_free;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 12b02fe1d0ae..59b028f0b1e8 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev)
}
}
+/**
+ * scale - scale values from one range to another
+ *
+ * @source_val: value in range [@source_min..@source_max]
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static uint32_t scale(uint32_t source_val,
+ uint32_t source_min, uint32_t source_max,
+ uint32_t target_min, uint32_t target_max)
+{
+ uint64_t target_val;
+
+ WARN_ON(source_min > source_max);
+ WARN_ON(target_min > target_max);
+
+ /* defensive */
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+ target_val = (uint64_t)(source_val - source_min) *
+ (target_max - target_min);
+ do_div(target_val, source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static inline u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max]. */
+static inline u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+ hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+ return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static inline u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(hw_level, panel->backlight.min, panel->backlight.max,
+ 0, user_max);
+}
+
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
dev_priv->display.set_backlight(connector, level);
}
-/* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max)
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- u32 freq;
+ u32 hw_level;
unsigned long flags;
- u64 n;
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
@@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
WARN_ON(panel->backlight.max == 0);
- /* scale to hardware max, but be careful to not overflow */
- freq = panel->backlight.max;
- n = (u64)level * freq;
- do_div(n, max);
- level = n;
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(connector, hw_level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 hw_level;
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ hw_level = clamp_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
- panel->backlight.level = level;
if (panel->backlight.device)
- panel->backlight.device->props.brightness = level;
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(connector, level);
+ intel_panel_actually_set_backlight(connector, hw_level);
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
@@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
- panel->backlight.level;
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
}
dev_priv->display.enable_backlight(connector);
@@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hw_level;
int ret;
intel_runtime_pm_get(dev_priv);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- ret = intel_panel_get_backlight(connector);
+
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+
drm_modeset_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
@@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
- BUG_ON(panel->backlight.max == 0);
+ WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.brightness = panel->backlight.level;
+
+ /*
+ * Note: Everything should work even if the backlight device max
+ * presented to the userspace is arbitrarily chosen.
+ */
props.max_brightness = panel->backlight.max;
+ props.brightness = scale_hw_to_user(connector,
+ panel->backlight.level,
+ props.max_brightness);
/*
* Note: using the same name independent of the connector prevents
@@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults.
*/
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ /* vbt value is a coefficient in range [0..255] */
+ return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
+ 0, panel->backlight.max);
+}
+
static int bdw_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bdw_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = pch_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = _vlv_get_backlight(dev, PIPE_A);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f1233f544f3e..40c12295c0bd 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
+ obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915.enable_fbc < 0) {
@@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev)
goto out_disable;
}
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ max_width = 4096;
+ max_height = 4096;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
@@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
- if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
@@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
-static void pineview_disable_cxsr(struct drm_device *dev)
+void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ } else if (IS_PINEVIEW(dev)) {
+ val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+ val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+ I915_WRITE(DSPFW3, val);
+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+ _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ I915_WRITE(FW_BLC_SELF, val);
+ } else if (IS_I915GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+ _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, val);
+ } else {
+ return;
+ }
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("memory self-refresh is %s\n",
+ enable ? "enabled" : "disabled");
}
/*
@@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = G4X_FIFO_SIZE,
+ .max_wm = G4X_MAX_WM,
+ .default_wm = G4X_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_wm_info = {
- VALLEYVIEW_FIFO_SIZE,
- VALLEYVIEW_MAX_WM,
- VALLEYVIEW_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = VALLEYVIEW_FIFO_SIZE,
+ .max_wm = VALLEYVIEW_MAX_WM,
+ .default_wm = VALLEYVIEW_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_cursor_wm_info = {
- I965_CURSOR_FIFO,
- VALLEYVIEW_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I945_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I915_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i830_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i845_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I830_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
/**
@@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
return;
}
@@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ intel_set_memory_cxsr(dev_priv, true);
} else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ intel_set_memory_cxsr(dev_priv, false);
}
}
@@ -1249,15 +1287,14 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
entries = (clock / 1000) * pixel_size;
- *plane_prec_mult = (entries > 256) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
- *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
- pixel_size);
+ *plane_prec_mult = (entries > 128) ?
+ DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
+ *plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
- *cursor_prec_mult = (entries > 256) ?
- DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
- *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
+ *cursor_prec_mult = (entries > 128) ?
+ DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
+ *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
return true;
}
@@ -1282,9 +1319,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
&cursor_prec_mult, &cursora_dl)) {
cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
+ DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
+ DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
I915_WRITE(VLV_DDL1, cursora_prec |
(cursora_dl << DDL_CURSORA_SHIFT) |
@@ -1295,9 +1332,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
&cursor_prec_mult, &cursorb_dl)) {
cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
+ DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
- DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
+ DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
I915_WRITE(VLV_DDL2, cursorb_prec |
(cursorb_dl << DDL_CURSORB_SHIFT) |
@@ -1316,6 +1353,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
vlv_update_drain_latency(dev);
@@ -1342,10 +1380,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF_VLV,
- I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1365,6 +1403,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1375,6 +1416,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns,
@@ -1394,10 +1436,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1418,6 +1460,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1427,6 +1472,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
+ bool cxsr_enabled;
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
@@ -1468,13 +1514,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
+ cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ intel_set_memory_cxsr(dev_priv, false);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1486,6 +1530,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1545,12 +1592,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev) && enabled) {
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
- fb = to_intel_framebuffer(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->fb);
/* self-refresh seems busted with untiled */
- if (fb->obj->tiling_mode == I915_TILING_NONE)
+ if (obj->tiling_mode == I915_TILING_NONE)
enabled = NULL;
}
@@ -1560,10 +1607,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+ intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
@@ -1609,17 +1653,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
+ if (enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -2707,10 +2742,11 @@ static void ilk_update_wm(struct drm_crtc *crtc)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled)
+static void
+ilk_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enabled, bool scaled)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2718,6 +2754,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.vert_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
/*
@@ -2852,13 +2889,16 @@ void intel_update_watermarks(struct drm_crtc *crtc)
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = plane->dev->dev_private;
if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+ dev_priv->display.update_sprite_wm(plane, crtc,
+ sprite_width, sprite_height,
pixel_size, enabled, scaled);
}
@@ -3147,6 +3187,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+ mask &= dev_priv->pm_rps_events;
+
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
@@ -3250,7 +3293,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ else if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3348,6 +3393,15 @@ static void gen6_disable_rps(struct drm_device *dev)
gen6_disable_rps_interrupts(dev);
}
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen8_disable_rps_interrupts(dev);
+}
+
static void valleyview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3365,10 +3419,10 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
else
mode = 0;
}
- DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
- (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
- (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+ DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+ (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+ (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
}
static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3392,8 +3446,8 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
mask = INTEL_RC6_ENABLE;
if ((enable_rc6 & mask) != enable_rc6)
- DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
- enable_rc6 & mask, enable_rc6, mask);
+ DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
+ enable_rc6 & mask, enable_rc6, mask);
return enable_rc6 & mask;
}
@@ -3419,7 +3473,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3430,7 +3484,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3483,15 +3537,23 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+ else
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev, rc6_mask);
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ rc6_mask);
+ else
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@@ -3727,7 +3789,57 @@ void gen6_update_ring_freq(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp0;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpe;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+ rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+ return rpe;
+}
+
+static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp1;
+}
+
+static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpn;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+ return rpn;
+}
+
+static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
@@ -3752,7 +3864,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
return rpe;
}
-int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
@@ -3766,6 +3878,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
dev_priv->vlv_pctx->stolen->start);
}
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+ unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+ WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long pctx_paddr, paddr;
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ u32 pcbr;
+ int pctx_size = 32*1024;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ pcbr = I915_READ(VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ paddr = (dev_priv->mm.stolen_base +
+ (gtt->stolen_size - pctx_size));
+
+ pctx_paddr = (paddr & (~4095));
+ I915_WRITE(VLV_PCBR, pctx_paddr);
+ }
+}
+
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3840,6 +3981,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
+ dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -3855,11 +4001,142 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ cherryview_setup_pctx(dev);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
+
+ dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
+ dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
{
valleyview_cleanup_pctx(dev);
}
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+ int i;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
+ DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+ gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ cherryview_check_pctx(dev_priv);
+
+ /* 1a & 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /* allows RC6 residency counter to work */
+ I915_WRITE(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* For now we assume BIOS is allocating and populating the PCBR */
+ pcbr = I915_READ(VLV_PCBR);
+
+ DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+
+ /* 3: Enable RC6 */
+ if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+ (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+ /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+ I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+ I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
+ /* 5: Enable RPS */
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+ DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq);
+
+ DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+ gen8_enable_rps_interrupts(dev);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3886,6 +4163,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -3906,9 +4184,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
+
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
@@ -4666,33 +4946,60 @@ void intel_init_gt_powersave(struct drm_device *dev)
{
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_init_gt_powersave(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_init_gt_powersave(dev);
}
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ return;
+ else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev);
}
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Interrupts should be disabled already to avoid re-arming. */
+ WARN_ON(intel_irqs_enabled(dev_priv));
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ cancel_work_sync(&dev_priv->rps.work);
+
+ /* Force GPU to min freq during suspend */
+ gen6_rps_idle(dev_priv);
+}
+
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(dev->irq_enabled);
+ WARN_ON(intel_irqs_enabled(dev_priv));
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
- if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
- intel_runtime_pm_put(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ intel_suspend_gt_powersave(dev);
- cancel_work_sync(&dev_priv->rps.work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_disable_rps(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
@@ -4710,7 +5017,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ cherryview_enable_rps(dev);
+ } else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
@@ -4735,7 +5044,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ } else if (INTEL_INFO(dev)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -4918,11 +5227,9 @@ static void gen6_check_mch_setup(struct drm_device *dev)
uint32_t tmp;
tmp = I915_READ(MCH_SSKPD);
- if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
- DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
- DRM_INFO("This can cause pipe underruns and display issues.\n");
- DRM_INFO("Please upgrade your BIOS to fix this.\n");
- }
+ if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
+ DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
+ tmp);
}
static void gen6_init_clock_gating(struct drm_device *dev)
@@ -5108,7 +5415,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
I915_WRITE(_3D_CHICKEN3,
- _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5343,10 +5650,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
- dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
- dev_priv->vlv_cdclk_freq);
-
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:vlv */
@@ -5421,6 +5724,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ switch ((val >> 2) & 0x7) {
+ case 0:
+ case 1:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 2:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 3:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
+ dev_priv->mem_freq = 2000;
+ break;
+ case 4:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 5:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
+ dev_priv->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5661,7 +5993,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- unsigned long irqflags;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -5677,21 +6008,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev)) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
- dev_priv->de_irq_mask[PIPE_B]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
- ~dev_priv->de_irq_mask[PIPE_B] |
- GEN8_PIPE_VBLANK);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
- dev_priv->de_irq_mask[PIPE_C]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
- ~dev_priv->de_irq_mask[PIPE_C] |
- GEN8_PIPE_VBLANK);
- POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv);
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -5762,34 +6080,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
return true;
}
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable)
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
+ enum punit_power_well power_well_id = power_well->data;
u32 mask;
u32 state;
u32 ctrl;
- enum pipe pipe;
-
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- if (enable) {
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_INTEGRATED_CRI_CLK_VLV);
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- } else {
- for_each_pipe(pipe)
- assert_pll_disabled(dev_priv, pipe);
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
- ~DPIO_CMNRST);
- }
- }
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5817,28 +6114,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
out:
mutex_unlock(&dev_priv->rps.hw_lock);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- enum punit_power_well power_well_id = power_well->data;
-
- __vlv_set_power_well(dev_priv, power_well_id, enable);
}
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5930,6 +6205,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false);
}
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ for_each_pipe(pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -6079,6 +6401,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -6178,6 +6501,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
.is_enabled = vlv_power_well_enabled,
};
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable,
@@ -6238,10 +6568,25 @@ static struct i915_power_well vlv_power_wells[] = {
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_power_well_ops,
+ .ops = &vlv_dpio_cmn_power_well_ops,
},
};
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -6292,11 +6637,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+ /* nothing to do if common lane is already off */
+ if (!cmn->ops->is_enabled(dev_priv, cmn))
+ return;
+
+ /* If the display might be already active skip this */
+ if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->ops->disable(dev_priv, cmn);
+}
+
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
+
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(dev_priv);
+ mutex_unlock(&power_domains->lock);
+ }
+
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
@@ -6469,7 +6853,7 @@ void intel_init_pm(struct drm_device *dev)
(dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
@@ -6552,7 +6936,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div;
@@ -6574,7 +6958,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul;
@@ -6596,6 +6980,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
+static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div, freq;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ div = 5;
+ break;
+ case 267:
+ div = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ div = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+
+ return freq;
+}
+
+static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int mul, opcode;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ mul = 5;
+ break;
+ case 267:
+ mul = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ mul = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
+
+ return opcode;
+}
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_gpu_freq(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_gpu_freq(dev_priv, val);
+
+ return ret;
+}
+
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_freq_opcode(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_freq_opcode(dev_priv, val);
+
+ return ret;
+}
+
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6606,5 +7064,5 @@ void intel_pm_setup(struct drm_device *dev)
intel_gen6_powersave_work);
dev_priv->pm.suspended = false;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index a5e783a9928a..fd4f66231d30 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -28,7 +28,6 @@
struct intel_renderstate_rodata {
const u32 *reloc;
- const u32 reloc_items;
const u32 *batch;
const u32 batch_items;
};
@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
.reloc = gen ## _g ## _null_state_relocs, \
- .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
.batch = gen ## _g ## _null_state_batch, \
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 740538ad0977..56c1429d8a60 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = {
0x0000002c,
0x000001e0,
0x000001e4,
+ -1,
};
static const u32 gen6_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 6fa7ff2a1298..419e35a7b0ff 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = {
0x00000010,
0x00000018,
0x000001ec,
+ -1,
};
static const u32 gen7_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 5c875615d42a..75ef1b5de45c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = {
0x00000050,
0x00000060,
0x000003ec,
+ -1,
};
static const u32 gen8_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 279488addf3f..16371a444426 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
return space;
}
-static inline int ring_space(struct intel_engine_cs *ring)
+static inline int ring_space(struct intel_ringbuffer *ringbuf)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
}
@@ -381,6 +380,27 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
}
static int
+gen8_emit_pipe_control(struct intel_engine_cs *ring,
+ u32 flags, u32 scratch_addr)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(ring, flags);
+ intel_ring_emit(ring, scratch_addr);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
gen8_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
{
@@ -403,22 +423,17 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
- }
-
- ret = intel_ring_begin(ring, 6);
- if (ret)
- return ret;
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
-
- return 0;
+ /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
+ ret = gen8_emit_pipe_control(ring,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD,
+ 0);
+ if (ret)
+ return ret;
+ }
+ return gen8_emit_pipe_control(ring, flags, scratch_addr);
}
static void ring_write_tail(struct intel_engine_cs *ring,
@@ -517,6 +532,9 @@ static int init_ring_common(struct intel_engine_cs *ring)
else
ring_setup_phys_status_page(ring);
+ /* Enforce ordering by reading HEAD register back */
+ I915_READ_HEAD(ring);
+
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
@@ -545,7 +563,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
ringbuf->last_retired_head = -1;
}
@@ -604,6 +622,8 @@ static int init_render_ring(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
+ if (ret)
+ return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
@@ -658,6 +678,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
static void render_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->semaphore_obj) {
+ i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
+ drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
+ dev_priv->semaphore_obj = NULL;
+ }
if (ring->scratch.obj == NULL)
return;
@@ -671,29 +698,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
ring->scratch.obj = NULL;
}
+static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 8
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset));
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, 0);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
+static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 6
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
+ MI_FLUSH_DW_OP_STOREDW);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
static int gen6_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
- int i, ret;
+ int i, ret, num_rings;
- /* NB: In order to be able to do semaphore MBOX updates for varying
- * number of rings, it's easiest if we round up each individual update
- * to a multiple of 2 (since ring updates must always be a multiple of
- * 2) even though the actual update only requires 3 dwords.
- */
-#define MBOX_UPDATE_DWORDS 4
- if (i915_semaphore_is_enabled(dev))
- num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
- else
- return intel_ring_begin(signaller, num_dwords);
+#define MBOX_UPDATE_DWORDS 3
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
+#undef MBOX_UPDATE_DWORDS
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
-#undef MBOX_UPDATE_DWORDS
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
@@ -701,15 +795,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
- intel_ring_emit(signaller, MI_NOOP);
- } else {
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
}
}
+ /* If num_dwords was rounded, make sure the tail pointer is correct */
+ if (num_rings % 2 == 0)
+ intel_ring_emit(signaller, MI_NOOP);
+
return 0;
}
@@ -727,7 +819,11 @@ gen6_add_request(struct intel_engine_cs *ring)
{
int ret;
- ret = ring->semaphore.signal(ring, 4);
+ if (ring->semaphore.signal)
+ ret = ring->semaphore.signal(ring, 4);
+ else
+ ret = intel_ring_begin(ring, 4);
+
if (ret)
return ret;
@@ -754,6 +850,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
* @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on
*/
+
+static int
+gen8_ring_sync(struct intel_engine_cs *waiter,
+ struct intel_engine_cs *signaller,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ int ret;
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter,
+ lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_emit(waiter,
+ upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_advance(waiter);
+ return 0;
+}
+
static int
gen6_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller,
@@ -901,7 +1023,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0)
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
@@ -916,7 +1038,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0)
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1109,7 +1231,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1129,7 +1251,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1147,7 +1269,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1167,7 +1289,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
- snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1329,6 +1451,7 @@ static int init_status_page(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj;
if ((obj = ring->status_page.obj) == NULL) {
+ unsigned flags;
int ret;
obj = i915_gem_alloc_object(ring->dev, 4096);
@@ -1341,7 +1464,20 @@ static int init_status_page(struct intel_engine_cs *ring)
if (ret)
goto err_unref;
- ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+ flags = 0;
+ if (!HAS_LLC(ring->dev))
+ /* On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actualy map it).
+ */
+ flags |= PIN_MAPPABLE;
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
if (ret) {
err_unref:
drm_gem_object_unreference(&obj->base);
@@ -1378,15 +1514,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-static int allocate_ring_buffer(struct intel_engine_cs *ring)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
+ if (!ringbuf->obj)
+ return;
+
+ iounmap(ringbuf->virtual_start);
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ drm_gem_object_unreference(&ringbuf->obj->base);
+ ringbuf->obj = NULL;
+}
+
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj;
int ret;
- if (intel_ring_initialized(ring))
+ if (ringbuf->obj)
return 0;
obj = NULL;
@@ -1397,6 +1543,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
if (obj == NULL)
return -ENOMEM;
+ /* mark ring buffers as read-only from GPU side by default */
+ obj->gt_ro = 1;
+
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
@@ -1455,7 +1604,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = allocate_ring_buffer(ring);
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
goto error;
@@ -1496,11 +1645,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
- iounmap(ringbuf->virtual_start);
-
- i915_gem_object_ggtt_unpin(ringbuf->obj);
- drm_gem_object_unreference(&ringbuf->obj->base);
- ringbuf->obj = NULL;
+ intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1526,7 +1671,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
@@ -1549,7 +1694,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1578,7 +1723,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
@@ -1630,7 +1775,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1746,14 +1891,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
BUG_ON(ring->outstanding_lazy_seqno);
- if (INTEL_INFO(ring->dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
- if (HAS_VEBOX(ring->dev))
+ if (HAS_VEBOX(dev))
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
@@ -1941,45 +2087,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_object *obj;
+ int ret;
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen >= 8) {
+ if (i915_semaphore_is_enabled(dev)) {
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else
+ dev_priv->semaphore_obj = obj;
+ }
+ }
+ ring->add_request = gen6_add_request;
+ ring->flush = gen8_render_ring_flush;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (i915_semaphore_is_enabled(dev)) {
+ WARN_ON(!dev_priv->semaphore_obj);
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_rcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
+ } else if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
- if (INTEL_INFO(dev)->gen >= 8) {
- ring->flush = gen8_render_ring_flush;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- } else {
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- }
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between RCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and RCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between VCS2 and RCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
@@ -2007,6 +2182,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
+
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
@@ -2024,9 +2200,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
- struct drm_i915_gem_object *obj;
- int ret;
-
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
@@ -2157,31 +2330,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between VCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and VCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
@@ -2218,7 +2392,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
return -EINVAL;
}
- ring->name = "bds2_ring";
+ ring->name = "bsd2 ring";
ring->id = VCS2;
ring->write_tail = ring_write_tail;
@@ -2233,25 +2407,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on the pre-gen8. And there
- * is no bsd2 ring on the pre-gen8. So now the semaphore_register
- * between VCS2 and other ring is initialized as invalid.
- * Gen8 will initialize the sema between VCS2 and other ring later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2277,30 +2437,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between BCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between BCS and VCS2 later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform. And
- * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
- * between BCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between BCS and VCS2 later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2327,24 +2495,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e72017bdcd7f..70525d0c2c74 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -40,10 +40,37 @@ struct intel_hw_status_page {
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
+ * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
+ */
+#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SIGNAL_OFFSET(__ring, to) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (to)))
+
+#define GEN8_WAIT_OFFSET(__ring, from) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (__ring)->id))
+
+#define GEN8_RING_SEMAPHORE_INIT do { \
+ if (!dev_priv->semaphore_obj) { \
+ break; \
+ } \
+ ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
+ ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
+ ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
+ ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
+ ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
+ ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+ } while(0)
+
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
HANGCHECK_ACTIVE,
+ HANGCHECK_ACTIVE_LOOP,
HANGCHECK_KICK,
HANGCHECK_HUNG,
};
@@ -52,6 +79,7 @@ enum intel_ring_hangcheck_action {
struct intel_ring_hangcheck {
u64 acthd;
+ u64 max_acthd;
u32 seqno;
int score;
enum intel_ring_hangcheck_action action;
@@ -127,15 +155,55 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
+ /* GEN8 signal/wait table - never trust comments!
+ * signal to signal to signal to signal to signal to
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
+ * ie. transpose of g(x, y)
+ *
+ * sync from sync from sync from sync from sync from
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
+ * ie. transpose of f(x, y)
+ */
struct {
u32 sync_seqno[I915_NUM_RINGS-1];
- struct {
- /* our mbox written by others */
- u32 wait[I915_NUM_RINGS];
- /* mboxes this ring signals to */
- u32 signal[I915_NUM_RINGS];
- } mbox;
+ union {
+ struct {
+ /* our mbox written by others */
+ u32 wait[I915_NUM_RINGS];
+ /* mboxes this ring signals to */
+ u32 signal[I915_NUM_RINGS];
+ } mbox;
+ u64 signal_ggtt[I915_NUM_RINGS];
+ };
/* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring,
@@ -238,9 +306,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
int idx;
/*
- * cs -> 0 = vcs, 1 = bcs
- * vcs -> 0 = bcs, 1 = cs,
- * bcs -> 0 = cs, 1 = vcs.
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other - ring) - 1;
@@ -318,9 +388,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
-static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
+static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
- return ring->buffer->tail;
+ return ringbuf->tail;
}
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 20375cc7f82d..9350edd6728d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2433,7 +2433,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.unregister = intel_sdvo_connector_unregister;
intel_connector_attach_encoder(&connector->base, &encoder->base);
- ret = drm_sysfs_connector_add(drm_connector);
+ ret = drm_connector_register(drm_connector);
if (ret < 0)
goto err1;
@@ -2446,7 +2446,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
return 0;
err2:
- drm_sysfs_connector_remove(drm_connector);
+ drm_connector_unregister(drm_connector);
err1:
drm_connector_cleanup(drm_connector);
@@ -2559,7 +2559,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2638,7 +2638,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2711,7 +2711,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9a17b4e92ef4..168c6652cda1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
sprctl |= SP_ENABLE;
- intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
- intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static int
@@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
+ true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static int
@@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static void
@@ -819,6 +822,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
@@ -1006,6 +1010,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret)
@@ -1039,6 +1045,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
intel_plane->disable_plane(plane, crtc);
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
+
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
@@ -1068,6 +1076,7 @@ intel_disable_plane(struct drm_plane *plane)
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
+ enum pipe pipe;
if (!plane->fb)
return 0;
@@ -1076,6 +1085,7 @@ intel_disable_plane(struct drm_plane *plane)
return -EINVAL;
intel_crtc = to_intel_crtc(plane->crtc);
+ pipe = intel_crtc->pipe;
if (intel_crtc->active) {
bool primary_was_enabled = intel_crtc->primary_enabled;
@@ -1094,6 +1104,8 @@ intel_disable_plane(struct drm_plane *plane)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
+ i915_gem_track_fb(intel_plane->obj, NULL,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
intel_plane->obj = NULL;
@@ -1114,7 +1126,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1128,13 +1139,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, set->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
ret = intel_plane->update_colorkey(plane, set);
@@ -1147,7 +1157,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1157,13 +1166,12 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, get->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
intel_plane->get_colorkey(plane, get);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 67c6c9a2eb1c..e211eef4b7e4 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1680,5 +1680,5 @@ intel_tv_init(struct drm_device *dev)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f6fef7ac069..e81bc3bdc533 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
}
/* WaRsForcewakeWaitTC0:vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ gen6_gt_check_fifodbg(dev_priv);
}
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
@@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg)
intel_runtime_pm_put(dev_priv);
}
-static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
- } else {
- dev_priv->uncore.forcewake_count = 0;
- dev_priv->uncore.fw_rendercount = 0;
- dev_priv->uncore.fw_mediacount = 0;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_early_sanitize(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_forcewake_reset(dev, restore_forcewake);
}
void intel_uncore_sanitize(struct drm_device *dev)
@@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (((reg) >= 0x2000 && (reg) < 0x4000) ||\
- ((reg) >= 0x5000 && (reg) < 0x8000) ||\
- ((reg) >= 0xB000 && (reg) < 0x12000) ||\
- ((reg) >= 0x2E000 && (reg) < 0x30000))
+#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
- (((reg) >= 0x12000 && (reg) < 0x14000) ||\
- ((reg) >= 0x22000 && (reg) < 0x24000) ||\
- ((reg) >= 0x30000 && (reg) < 0x40000))
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0xB000, 0x12000) || \
+ REG_RANGE((reg), 0x2E000, 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x22000, 0x24000) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0x8300, 0x8500) || \
+ REG_RANGE((reg), 0xB000, 0xC000) || \
+ REG_RANGE((reg), 0xE000, 0xE800))
+
+#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x8800, 0x8900) || \
+ REG_RANGE((reg), 0xD000, 0xD800) || \
+ REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x1A000, 0x1C000) || \
+ REG_RANGE((reg), 0x1E800, 0x1EA00) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x4000, 0x5000) || \
+ REG_RANGE((reg), 0x8000, 0x8300) || \
+ REG_RANGE((reg), 0x8500, 0x8600) || \
+ REG_RANGE((reg), 0x9000, 0xB000) || \
+ REG_RANGE((reg), 0xC000, 0xC800) || \
+ REG_RANGE((reg), 0xF000, 0x10000) || \
+ REG_RANGE((reg), 0x14000, 0x14400) || \
+ REG_RANGE((reg), 0x22000, 0x24000))
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -490,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
}
static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
+ bool before)
{
+ const char *op = read ? "reading" : "writing to";
+ const char *when = before ? "before" : "after";
+
+ if (!i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unknown unclaimed register before writing to %x\n",
- reg);
+ WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
+ when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
+ if (i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed write to %x\n", reg);
+ DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
@@ -540,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (dev_priv->uncore.forcewake_count == 0 && \
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -550,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
REG_READ_FOOTER; \
}
@@ -573,7 +609,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
+#define __chv_read(x) \
+static u##x \
+chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ unsigned fwengine = 0; \
+ REG_READ_HEADER(x); \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_READ_FOOTER; \
+}
+__chv_read(8)
+__chv_read(16)
+__chv_read(32)
+__chv_read(64)
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
@@ -591,6 +655,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __chv_read
#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
@@ -647,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- hsw_unclaimed_reg_clear(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- hsw_unclaimed_reg_check(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
@@ -681,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -692,9 +759,43 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
} else { \
__raw_i915_write##x(dev_priv, reg, val); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
+#define __chv_write(x) \
+static void \
+chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ unsigned fwengine = 0; \
+ bool shadowed = is_gen8_shadowed(dev_priv, reg); \
+ REG_WRITE_HEADER; \
+ if (!shadowed) { \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_WRITE_FOOTER; \
+}
+
+__chv_write(8)
+__chv_write(16)
+__chv_write(32)
+__chv_write(64)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
@@ -716,6 +817,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
+#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
@@ -731,7 +833,7 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
- intel_uncore_early_sanitize(dev);
+ intel_uncore_early_sanitize(dev, false);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -779,14 +881,26 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
- dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
- dev_priv->uncore.funcs.mmio_writew = gen8_write16;
- dev_priv->uncore.funcs.mmio_writel = gen8_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ if (IS_CHERRYVIEW(dev)) {
+ dev_priv->uncore.funcs.mmio_writeb = chv_write8;
+ dev_priv->uncore.funcs.mmio_writew = chv_write16;
+ dev_priv->uncore.funcs.mmio_writel = chv_write32;
+ dev_priv->uncore.funcs.mmio_writeq = chv_write64;
+ dev_priv->uncore.funcs.mmio_readb = chv_read8;
+ dev_priv->uncore.funcs.mmio_readw = chv_read16;
+ dev_priv->uncore.funcs.mmio_readl = chv_read32;
+ dev_priv->uncore.funcs.mmio_readq = chv_read64;
+
+ } else {
+ dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen8_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen8_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ }
break;
case 7:
case 6:
@@ -912,7 +1026,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad)
return -EINVAL;
- if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1053,18 +1167,16 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
- switch (INTEL_INFO(dev)->gen) {
- case 8:
- case 7:
- case 6: return gen6_do_reset(dev);
- case 5: return ironlake_do_reset(dev);
- case 4:
- if (IS_G4X(dev))
- return g4x_do_reset(dev);
- else
- return i965_do_reset(dev);
- default: return -ENODEV;
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ return gen6_do_reset(dev);
+ else if (IS_GEN5(dev))
+ return ironlake_do_reset(dev);
+ else if (IS_G4X(dev))
+ return g4x_do_reset(dev);
+ else if (IS_GEN4(dev))
+ return i965_do_reset(dev);
+ else
+ return -ENODEV;
}
void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index cf11ee68a6d9..80de23d9b9c9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -280,7 +280,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 13b7dd83faa9..5451dc58eff1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -272,7 +272,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
return 0;
}
-static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
.gamma_set = mga_crtc_fb_gamma_set,
.gamma_get = mga_crtc_fb_gamma_get,
.fb_probe = mgag200fb_create,
@@ -293,9 +293,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
return -ENOMEM;
mdev->mfbdev = mfbdev;
- mfbdev->helper.funcs = &mga_fb_helper_funcs;
spin_lock_init(&mfbdev->dirty_lock);
+ drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
+
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a034ed408252..45f04dea0ac2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1562,19 +1562,9 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj =
- drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -1621,7 +1611,7 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
mga_connector->i2c = mgag200_i2c_create(dev);
if (!mga_connector->i2c)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f12388967856..c99c50de3226 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,6 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on MSM_IOMMU
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 85d615e7d62f..a8a144b38eaa 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -203,6 +203,15 @@ enum a2xx_rb_copy_sample_select {
SAMPLE_0123 = 6,
};
+enum a2xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_MIN_DST_SRC = 2,
+ BLEND_MAX_DST_SRC = 3,
+ BLEND_DST_MINUS_SRC = 4,
+ BLEND_DST_PLUS_SRC_BIAS = 5,
+};
+
enum adreno_mmu_clnt_beh {
BEH_NEVR = 0,
BEH_TRAN_RNG = 1,
@@ -890,6 +899,39 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+}
#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@@ -963,7 +1005,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend
}
#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
-static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val)
{
return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
}
@@ -981,7 +1023,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend
}
#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
-static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val)
{
return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a7be56163d23..303e8a9e91a5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -41,31 +41,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum a3xx_render_mode {
- RB_RENDERING_PASS = 0,
- RB_TILING_PASS = 1,
- RB_RESOLVE_PASS = 2,
-};
-
enum a3xx_tile_mode {
LINEAR = 0,
TILE_32X32 = 2,
};
-enum a3xx_threadmode {
- MULTI = 0,
- SINGLE = 1,
-};
-
-enum a3xx_instrbuffermode {
- BUFFER = 1,
-};
-
-enum a3xx_threadsize {
- TWO_QUADS = 0,
- FOUR_QUADS = 1,
-};
-
enum a3xx_state_block_id {
HLSQ_BLOCK_ID_TP_TEX = 2,
HLSQ_BLOCK_ID_TP_MIPMAP = 3,
@@ -169,6 +149,8 @@ enum a3xx_color_fmt {
RB_R8G8B8A8_UNORM = 8,
RB_Z16_UNORM = 12,
RB_A8_UNORM = 20,
+ RB_R16G16B16A16_FLOAT = 27,
+ RB_R32G32B32A32_FLOAT = 51,
};
enum a3xx_color_swap {
@@ -178,12 +160,6 @@ enum a3xx_color_swap {
XYZW = 3,
};
-enum a3xx_msaa_samples {
- MSAA_ONE = 0,
- MSAA_TWO = 1,
- MSAA_FOUR = 2,
-};
-
enum a3xx_sp_perfcounter_select {
SP_FS_CFLOW_INSTRUCTIONS = 12,
SP_FS_FULL_ALU_INSTRUCTIONS = 14,
@@ -191,21 +167,45 @@ enum a3xx_sp_perfcounter_select {
SP_ALU_ACTIVE_CYCLES = 29,
};
-enum adreno_rb_copy_control_mode {
- RB_COPY_RESOLVE = 1,
- RB_COPY_DEPTH_STENCIL = 5,
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_XOR = 6,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
};
enum a3xx_tex_filter {
A3XX_TEX_NEAREST = 0,
A3XX_TEX_LINEAR = 1,
+ A3XX_TEX_ANISO = 2,
};
enum a3xx_tex_clamp {
A3XX_TEX_REPEAT = 0,
A3XX_TEX_CLAMP_TO_EDGE = 1,
A3XX_TEX_MIRROR_REPEAT = 2,
- A3XX_TEX_CLAMP_NONE = 3,
+ A3XX_TEX_CLAMP_TO_BORDER = 3,
+ A3XX_TEX_MIRROR_CLAMP = 4,
};
enum a3xx_tex_swiz {
@@ -316,6 +316,7 @@ enum a3xx_tex_type {
#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
@@ -549,6 +550,10 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
+
+#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
+
#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
@@ -556,6 +561,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
+#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -620,8 +628,26 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
}
#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
@@ -743,6 +769,7 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode va
#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
@@ -751,6 +778,10 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
}
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_XCOORD 0x00004000
+#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000
+#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000
+#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
@@ -796,7 +827,7 @@ static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4
#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
-static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
{
return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
}
@@ -856,7 +887,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -874,7 +905,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -957,17 +988,24 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples
{
return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008
#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
{
- return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+ return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
}
#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
@@ -1005,6 +1043,12 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
{
return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
}
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
@@ -1019,6 +1063,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
}
#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
@@ -1044,7 +1089,7 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
@@ -1172,6 +1217,8 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
}
#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
@@ -1179,7 +1226,23 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+#define REG_A3XX_VGT_BIN_BASE 0x000021e1
+
+#define REG_A3XX_VGT_BIN_SIZE 0x000021e2
+
#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK;
+}
#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
@@ -1203,6 +1266,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
}
#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
@@ -1232,6 +1296,7 @@ static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize
}
#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
@@ -1242,6 +1307,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
}
#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
@@ -1312,10 +1383,36 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
}
#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
+}
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
-#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
-#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; }
#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
@@ -1323,7 +1420,9 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
-#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
@@ -1438,6 +1537,12 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
{
return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
}
+#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
@@ -1462,12 +1567,13 @@ static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val
}
#define REG_A3XX_VPC_ATTR 0x00002280
-#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
{
return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
}
+#define A3XX_VPC_ATTR_PSIZE 0x00000200
#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
@@ -1522,11 +1628,11 @@ static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
{
return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
}
-#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
-#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
-static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
+#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val)
{
- return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
+ return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK;
}
#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
@@ -1569,6 +1675,7 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
}
#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1742,6 +1849,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
}
#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1802,6 +1910,13 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
@@ -1914,6 +2029,42 @@ static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070
+#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071
+#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072
+
+#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073
+
+#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074
+
+#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075
+
+#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c
+
#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -2080,6 +2231,8 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
}
#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6
+
#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
@@ -2117,6 +2270,39 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+}
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@@ -2152,6 +2338,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
{
return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
}
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
+static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
+}
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
@@ -2170,6 +2362,7 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
#define REG_A3XX_TEX_CONST_0 0x00000000
#define A3XX_TEX_CONST_0_TILED 0x00000001
+#define A3XX_TEX_CONST_0_SRGB 0x00000004
#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
@@ -2206,6 +2399,7 @@ static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
{
return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
}
+#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000
#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 942e09d898a8..2773600c9488 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -392,13 +392,10 @@ static const unsigned int a3xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- struct drm_device *dev = gpu->dev;
int i;
adreno_show(gpu, m);
- mutex_lock(&dev->struct_mutex);
-
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
@@ -418,8 +415,6 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
}
gpu->funcs->pm_suspend(gpu);
-
- mutex_unlock(&dev->struct_mutex);
}
#endif
@@ -685,6 +680,8 @@ static int a3xx_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,adreno-3xx" },
+ /* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index bb9a8ca0507b..85ff66cbddd6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -19,6 +19,11 @@
#define __A3XX_GPU_H__
#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
#include "a3xx.xml.h"
struct a3xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index d6e6ce2d1abd..9de19ac2e86c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -87,15 +87,6 @@ enum adreno_rb_blend_factor {
FACTOR_SRC_ALPHA_SATURATE = 16,
};
-enum adreno_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_MIN_DST_SRC = 2,
- BLEND_MAX_DST_SRC = 3,
- BLEND_DST_MINUS_SRC = 4,
- BLEND_DST_PLUS_SRC_BIAS = 5,
-};
-
enum adreno_rb_surface_endian {
ENDIAN_NONE = 0,
ENDIAN_8IN16 = 1,
@@ -116,6 +107,39 @@ enum adreno_rb_depth_format {
DEPTHX_24_8 = 1,
};
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_CLEAR = 2,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+ RB_COMPUTE_PASS = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
@@ -264,6 +288,8 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
#define REG_AXXX_CP_INT_ACK 0x000001f4
#define REG_AXXX_CP_ME_CNTL 0x000001f6
+#define AXXX_CP_ME_CNTL_BUSY 0x20000000
+#define AXXX_CP_ME_CNTL_HALT 0x10000000
#define REG_AXXX_CP_ME_STATUS 0x000001f7
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 28ca8cd8b09e..655ce5b14ad0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
DBG("%s", gpu->name);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ if (ret) {
+ gpu->rb_iova = 0;
+ dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
+ return ret;
+ }
+
/* Setup REG_CP_RB_CNTL: */
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
/* size is log2(quad-words): */
@@ -362,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
+ mutex_lock(&drm->struct_mutex);
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->memptrs_bo)) {
ret = PTR_ERR(gpu->memptrs_bo);
gpu->memptrs_bo = NULL;
@@ -371,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
+ gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
if (!gpu->memptrs) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
- ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
+ ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
&gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index ae992c71703f..4eee0ec8f069 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -105,6 +105,7 @@ enum pc_di_index_size {
enum pc_di_vis_cull_mode {
IGNORE_VISIBILITY = 0,
+ USE_VISIBILITY = 1,
};
enum adreno_pm4_packet_type {
@@ -163,6 +164,11 @@ enum adreno_pm4_type3_packets {
CP_SET_BIN = 76,
CP_TEST_TWO_MEMS = 113,
CP_WAIT_FOR_ME = 19,
+ CP_SET_DRAW_STATE = 67,
+ CP_DRAW_INDX_OFFSET = 56,
+ CP_DRAW_INDIRECT = 40,
+ CP_DRAW_INDX_INDIRECT = 41,
+ CP_DRAW_AUTO = 36,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -232,6 +238,211 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
}
+#define REG_CP_DRAW_INDX_0 0x00000000
+#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_1 0x00000001
+#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_0 0x00000000
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_1 0x00000001
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_2 0x00000002
+#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
+}
+
+#define REG_CP_SET_DRAW_STATE_0 0x00000000
+#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK;
+}
+#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK;
+}
+
+#define REG_CP_SET_DRAW_STATE_1 0x00000001
+#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff
+#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK;
+}
+
#define REG_CP_SET_BIN_0 0x00000000
#define REG_CP_SET_BIN_1 0x00000001
@@ -262,5 +473,21 @@ static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
}
+#define REG_CP_SET_BIN_DATA_0 0x00000000
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA_1 0x00000001
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 87be647e3825..0f1f5b9459a5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 747a6ef4211f..d468f86f637c 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 48e03acf19bf..da8740054cdf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 7f7aadef8a82..a125a7e32742 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -123,7 +123,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
for (i = 0; i < config->hpd_reg_cnt; i++) {
struct regulator *reg;
- reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+ reg = devm_regulator_get_exclusive(&pdev->dev,
+ config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
@@ -138,7 +139,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
for (i = 0; i < config->pwr_reg_cnt; i++) {
struct regulator *reg;
- reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+ reg = devm_regulator_get_exclusive(&pdev->dev,
+ config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
@@ -266,37 +268,56 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
int gpio = of_get_named_gpio(of_node, name, 0);
if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
- gpio = -1;
+ char name2[32];
+ snprintf(name2, sizeof(name2), "%s-gpio", name);
+ gpio = of_get_named_gpio(of_node, name2, 0);
+ if (gpio < 0) {
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
}
return gpio;
}
- /* TODO actually use DT.. */
- static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
- static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
- static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
- static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
- static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+ if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
+ static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+ static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+ static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
+ static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+ config.phy_init = hdmi_phy_8x74_init;
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.pwr_reg_names = pwr_reg_names;
+ config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_freq = hpd_clk_freq;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.pwr_clk_names = pwr_clk_names;
+ config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
+ config.shared_irq = true;
+ } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
+ static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
+ static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
+ config.phy_init = hdmi_phy_8960_init;
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) {
+ config.phy_init = hdmi_phy_8x60_init;
+ } else {
+ dev_err(dev, "unknown phy: %s\n", of_node->name);
+ }
- config.phy_init = hdmi_phy_8x74_init;
config.mmio_name = "core_physical";
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.pwr_reg_names = pwr_reg_names;
- config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_freq = hpd_clk_freq;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- config.pwr_clk_names = pwr_clk_names;
- config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
- config.shared_irq = true;
+ config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
#else
static const char *hpd_clk_names[] = {
@@ -373,7 +394,9 @@ static int hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,hdmi-tx" },
+ { .compatible = "qcom,hdmi-tx-8074" },
+ { .compatible = "qcom,hdmi-tx-8960" },
+ { .compatible = "qcom,hdmi-tx-8660" },
{}
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9d7723c6528a..b981995410b5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -96,6 +96,7 @@ struct hdmi_platform_config {
/* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+ int mux_lpm_gpio;
/* older devices had their own irq, mdp5+ it is shared w/ mdp: */
bool shared_irq;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e2636582cfd7..e89fe053d375 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -148,9 +148,9 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
-static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
-static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
#define HDMI_ACR_0_CTS__MASK 0xfffff000
#define HDMI_ACR_0_CTS__SHIFT 12
static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
@@ -158,7 +158,7 @@ static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
}
-static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; }
#define HDMI_ACR_1_N__MASK 0xffffffff
#define HDMI_ACR_1_N__SHIFT 0
static inline uint32_t HDMI_ACR_1_N(uint32_t val)
@@ -552,6 +552,103 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define REG_HDMI_8960_PHY_REG11 0x0000042c
#define REG_HDMI_8960_PHY_REG12 0x00000430
+#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020
+#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080
+
+#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434
+
+#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438
+
+#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c
+
+#define REG_HDMI_8960_PHY_REG13 0x00000440
+
+#define REG_HDMI_8960_PHY_REG14 0x00000444
+
+#define REG_HDMI_8960_PHY_REG15 0x00000448
+
+#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500
+
+#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c
+
+#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510
+
+#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514
+
+#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518
+#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002
+#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c
+
+#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570
+
+#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574
+
+#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578
+
+#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c
+
+#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580
+
+#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584
+
+#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594
+
+#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598
+#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001
+
+#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c
#define REG_HDMI_8x74_ANA_CFG0 0x00000000
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 28f7e3ec6c28..4aca2a3c667c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -63,7 +63,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_MUX_SEL", config->mux_en_gpio, ret);
+ "HDMI_MUX_EN", config->mux_en_gpio, ret);
goto error4;
}
gpio_set_value_cansleep(config->mux_en_gpio, 1);
@@ -78,6 +78,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
}
gpio_set_value_cansleep(config->mux_sel_gpio, 0);
}
+
+ if (config->mux_lpm_gpio != -1) {
+ ret = gpio_request(config->mux_lpm_gpio,
+ "HDMI_MUX_LPM");
+ if (ret) {
+ dev_err(dev->dev,
+ "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_LPM",
+ config->mux_lpm_gpio, ret);
+ goto error6;
+ }
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+ }
DBG("gpio on");
} else {
gpio_free(config->ddc_clk_gpio);
@@ -93,11 +106,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpio_set_value_cansleep(config->mux_sel_gpio, 1);
gpio_free(config->mux_sel_gpio);
}
+
+ if (config->mux_lpm_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+ gpio_free(config->mux_lpm_gpio);
+ }
DBG("gpio off");
}
return 0;
+error6:
+ if (config->mux_sel_gpio != -1)
+ gpio_free(config->mux_sel_gpio);
error5:
if (config->mux_en_gpio != -1)
gpio_free(config->mux_en_gpio);
@@ -306,7 +327,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
hdp_disable(hdmi_connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
hdmi_unreference(hdmi_connector->hdmi);
@@ -416,7 +437,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
ret = hpd_enable(hdmi_connector);
if (ret) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index e5b7ed5b8f01..902d7685d441 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,13 +15,370 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
#include "hdmi.h"
struct hdmi_phy_8960 {
struct hdmi_phy base;
struct hdmi *hdmi;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+ unsigned long pixclk;
};
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
+
+/*
+ * HDMI PLL:
+ *
+ * To get the parent clock setup properly, we need to plug in hdmi pll
+ * configuration into common-clock-framework.
+ */
+
+struct pll_rate {
+ unsigned long rate;
+ struct {
+ uint32_t val;
+ uint32_t reg;
+ } conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+ /* 1080p60/1080p50 case */
+ { 148500000, {
+ { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ { 108000000, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
+ /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
+ { 74250000, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0, 0 } }
+ },
+ { 65000000, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
+ /* 480p60/480i60 */
+ { 27030000, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ /* 576p50/576i50 */
+ { 27000000, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ /* 640x480p60 */
+ { 25200000, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+};
+
+static int hdmi_pll_enable(struct clk_hw *hw)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ int timeout_count, pll_lock_retry = 10;
+ unsigned int val;
+
+ DBG("");
+
+ /* Assert PLL S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
+
+ /* Wait for a short time before de-asserting
+ * to allow the hardware to complete its job.
+ * This much of delay should be fine for hardware
+ * to assert and de-assert.
+ */
+ udelay(10);
+
+ /* De-assert PLL S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ /* Assert PHY S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ val &= ~HDMI_8960_PHY_REG12_SW_RESET;
+ /* Wait for a short time before de-asserting
+ to allow the hardware to complete its job.
+ This much of delay should be fine for hardware
+ to assert and de-assert. */
+ udelay(10);
+ /* De-assert PHY S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ /* Wait 10 us for enabling global power for PHY */
+ mb();
+ udelay(10);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
+ val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80);
+
+ timeout_count = 1000;
+ while (--pll_lock_retry > 0) {
+
+ /* are we there yet? */
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0);
+ if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
+ break;
+
+ udelay(1);
+
+ if (--timeout_count > 0)
+ continue;
+
+ /*
+ * PLL has still not locked.
+ * Do a software reset and try again
+ * Assert PLL S/W reset first
+ */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ udelay(10);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ /*
+ * Wait for a short duration for the PLL calibration
+ * before checking if the PLL gets locked
+ */
+ udelay(350);
+
+ timeout_count = 1000;
+ }
+
+ return 0;
+}
+
+static void hdmi_pll_disable(struct clk_hw *hw)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ unsigned int val;
+
+ DBG("");
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ /* Make sure HDMI PHY/PLL are powered down */
+ mb();
+}
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+ int i;
+ for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+ if (rate > freqtbl[i].rate)
+ return &freqtbl[i-1];
+ return &freqtbl[i-1];
+}
+
+static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ return phy_8960->pixclk;
+}
+
+static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pll_rate *pll_rate = find_rate(rate);
+ return pll_rate->rate;
+}
+
+static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ const struct pll_rate *pll_rate = find_rate(rate);
+ int i;
+
+ DBG("rate=%lu", rate);
+
+ for (i = 0; pll_rate->conf[i].reg; i++)
+ hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+ phy_8960->pixclk = rate;
+
+ return 0;
+}
+
+
+static const struct clk_ops hdmi_pll_ops = {
+ .enable = hdmi_pll_enable,
+ .disable = hdmi_pll_disable,
+ .recalc_rate = hdmi_pll_recalc_rate,
+ .round_rate = hdmi_pll_round_rate,
+ .set_rate = hdmi_pll_set_rate,
+};
+
+static const char *hdmi_pll_parents[] = {
+ "pxo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "hdmi_pll",
+ .ops = &hdmi_pll_ops,
+ .parent_names = hdmi_pll_parents,
+ .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+};
+
+
+/*
+ * HDMI Phy:
+ */
static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
{
@@ -86,6 +443,9 @@ static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
struct hdmi *hdmi = phy_8960->hdmi;
+ DBG("pixclock: %lu", pixclock);
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
@@ -104,6 +464,8 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
struct hdmi *hdmi = phy_8960->hdmi;
+ DBG("");
+
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
}
@@ -118,7 +480,12 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
{
struct hdmi_phy_8960 *phy_8960;
struct hdmi_phy *phy = NULL;
- int ret;
+ int ret, i;
+
+ /* sanity check: */
+ for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
+ if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
+ return ERR_PTR(-EINVAL);
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
if (!phy_8960) {
@@ -132,6 +499,14 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
phy_8960->hdmi = hdmi;
+ phy_8960->pll_hw.init = &pll_init;
+ phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
+ if (IS_ERR(phy_8960->pll)) {
+ ret = PTR_ERR(phy_8960->pll);
+ phy_8960->pll = NULL;
+ goto fail;
+ }
+
return phy;
fail:
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d591567173c4..bd81db6a7829 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 416a26e1e58d..122208e8a2ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 0bb4faa17523..733646c0d3f8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -147,7 +147,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
- drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
kfree(mdp4_kms);
}
@@ -176,6 +176,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
clk_disable_unprepare(mdp4_kms->lut_clk);
+ if (mdp4_kms->axi_clk)
+ clk_disable_unprepare(mdp4_kms->axi_clk);
return 0;
}
@@ -188,6 +190,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
clk_prepare_enable(mdp4_kms->lut_clk);
+ if (mdp4_kms->axi_clk)
+ clk_prepare_enable(mdp4_kms->axi_clk);
return 0;
}
@@ -294,15 +298,17 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+ mdp4_kms->dsi_pll_vdda =
+ devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
if (IS_ERR(mdp4_kms->dsi_pll_vdda))
mdp4_kms->dsi_pll_vdda = NULL;
- mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+ mdp4_kms->dsi_pll_vddio =
+ devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
if (IS_ERR(mdp4_kms->dsi_pll_vddio))
mdp4_kms->dsi_pll_vddio = NULL;
- mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
@@ -333,6 +339,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
+ if (IS_ERR(mdp4_kms->axi_clk)) {
+ dev_err(dev->dev, "failed to get axi_clk\n");
+ ret = PTR_ERR(mdp4_kms->axi_clk);
+ goto fail;
+ }
+
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
@@ -348,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(dev, config->iommu);
+ mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
@@ -406,6 +419,8 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
static struct mdp4_platform_config config = {};
#ifdef CONFIG_OF
/* TODO */
+ config.max_clk = 266667000;
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 715520c54cde..3225da804c61 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -42,6 +42,7 @@ struct mdp4_kms {
struct clk *clk;
struct clk *pclk;
struct clk *lut_clk;
+ struct clk *axi_clk;
struct mdp_irq error_handler;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 0aa51517f826..67f4f896ba8c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -68,6 +68,8 @@ enum mdp5_pipe {
SSPP_RGB2 = 5,
SSPP_DMA0 = 6,
SSPP_DMA1 = 7,
+ SSPP_VIG3 = 8,
+ SSPP_RGB3 = 9,
};
enum mdp5_ctl_mode {
@@ -126,7 +128,11 @@ enum mdp5_client_id {
CID_RGB0 = 16,
CID_RGB1 = 17,
CID_RGB2 = 18,
- CID_MAX = 19,
+ CID_VIG3_Y = 19,
+ CID_VIG3_CR = 20,
+ CID_VIG3_CB = 21,
+ CID_RGB3 = 22,
+ CID_MAX = 23,
};
enum mdp5_igc_type {
@@ -299,11 +305,34 @@ static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
-static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+static inline uint32_t __offset_CTL(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ctl.base[0]);
+ case 1: return (mdp5_cfg->ctl.base[1]);
+ case 2: return (mdp5_cfg->ctl.base[2]);
+ case 3: return (mdp5_cfg->ctl.base[3]);
+ case 4: return (mdp5_cfg->ctl.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+static inline uint32_t __offset_LAYER(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000000;
+ case 1: return 0x00000004;
+ case 2: return 0x00000008;
+ case 3: return 0x0000000c;
+ case 4: return 0x00000010;
+ case 5: return 0x00000024;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
-static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
@@ -354,8 +383,20 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
}
#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
+#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
+#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
+}
-static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); }
#define MDP5_CTL_OP_MODE__MASK 0x0000000f
#define MDP5_CTL_OP_MODE__SHIFT 0
static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
@@ -377,7 +418,7 @@ static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
}
-static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); }
#define MDP5_CTL_FLUSH_VIG0 0x00000001
#define MDP5_CTL_FLUSH_VIG1 0x00000002
#define MDP5_CTL_FLUSH_VIG2 0x00000004
@@ -387,26 +428,48 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x1
#define MDP5_CTL_FLUSH_LM0 0x00000040
#define MDP5_CTL_FLUSH_LM1 0x00000080
#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_LM3 0x00000200
+#define MDP5_CTL_FLUSH_LM4 0x00000400
#define MDP5_CTL_FLUSH_DMA0 0x00000800
#define MDP5_CTL_FLUSH_DMA1 0x00001000
#define MDP5_CTL_FLUSH_DSPP0 0x00002000
#define MDP5_CTL_FLUSH_DSPP1 0x00004000
#define MDP5_CTL_FLUSH_DSPP2 0x00008000
#define MDP5_CTL_FLUSH_CTL 0x00020000
+#define MDP5_CTL_FLUSH_VIG3 0x00040000
+#define MDP5_CTL_FLUSH_RGB3 0x00080000
+#define MDP5_CTL_FLUSH_LM5 0x00100000
+#define MDP5_CTL_FLUSH_DSPP3 0x00200000
-static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
+{
+ switch (idx) {
+ case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
+ case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
+ case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
+ case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]);
+ case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]);
+ case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]);
+ case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]);
+ case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
+ case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
+ case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -420,7 +483,7 @@ static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
@@ -434,7 +497,7 @@ static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
@@ -448,7 +511,7 @@ static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); }
#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
@@ -462,7 +525,7 @@ static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); }
#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
@@ -476,15 +539,15 @@ static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -498,7 +561,7 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -512,9 +575,9 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
@@ -568,7 +631,7 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_ty
return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -594,7 +657,7 @@ static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
@@ -610,29 +673,29 @@ static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
-static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); }
#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
@@ -646,7 +709,7 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
@@ -686,23 +749,34 @@ static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+static inline uint32_t __offset_LM(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->lm.base[0]);
+ case 1: return (mdp5_cfg->lm.base[1]);
+ case 2: return (mdp5_cfg->lm.base[2]);
+ case 3: return (mdp5_cfg->lm.base[3]);
+ case 4: return (mdp5_cfg->lm.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
-static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
@@ -716,13 +790,13 @@ static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -744,57 +818,67 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+static inline uint32_t __offset_DSPP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->dspp.base[0]);
+ case 1: return (mdp5_cfg->dspp.base[1]);
+ case 2: return (mdp5_cfg->dspp.base[2]);
+ case 3: return (mdp5_cfg->dspp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
@@ -811,29 +895,40 @@ static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
-static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+static inline uint32_t __offset_INTF(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->intf.base[0]);
+ case 1: return (mdp5_cfg->intf.base[1]);
+ case 2: return (mdp5_cfg->intf.base[2]);
+ case 3: return (mdp5_cfg->intf.base[3]);
+ case 4: return (mdp5_cfg->intf.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); }
#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
@@ -847,23 +942,23 @@ static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
}
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
@@ -872,7 +967,7 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
}
#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
@@ -880,11 +975,11 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
}
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); }
#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
@@ -898,7 +993,7 @@ static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
}
-static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
@@ -913,124 +1008,132 @@ static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
}
#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
-static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); }
#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
-static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+static inline uint32_t __offset_AD(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ad.base[0]);
+ case 1: return (mdp5_cfg->ad.base[1]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); }
#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 71510ee26e96..31a2c6331a1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -26,14 +26,98 @@ static const char *iommu_ports[] = {
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
-static int mdp5_hw_init(struct msm_kms *kms)
+const struct mdp5_config *mdp5_cfg;
+
+static const struct mdp5_config msm8x74_config = {
+ .name = "msm8x74",
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01200, 0x01600, 0x01a00 },
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01e00, 0x02200, 0x02600 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02a00, 0x02e00 },
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04600, 0x04a00, 0x04e00 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+ },
+ .intf = {
+ .count = 4,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ },
+};
+
+static const struct mdp5_config apq8084_config = {
+ .name = "apq8084",
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x03200, 0x03600 },
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x13500, 0x13700, 0x13900 },
+ },
+ .intf = {
+ .count = 5,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ },
+};
+
+struct mdp5_config_entry {
+ int revision;
+ const struct mdp5_config *config;
+};
+
+static const struct mdp5_config_entry mdp5_configs[] = {
+ { .revision = 0, .config = &msm8x74_config },
+ { .revision = 2, .config = &msm8x74_config },
+ { .revision = 3, .config = &apq8084_config },
+};
+
+static int mdp5_select_hw_cfg(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
uint32_t version, major, minor;
- int ret = 0;
-
- pm_runtime_get_sync(dev->dev);
+ int i, ret = 0;
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
@@ -44,8 +128,8 @@ static int mdp5_hw_init(struct msm_kms *kms)
DBG("found MDP5 version v%d.%d", major, minor);
- if ((major != 1) || ((minor != 0) && (minor != 2))) {
- dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ if (major != 1) {
+ dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@@ -53,6 +137,35 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_kms->rev = minor;
+ /* only after mdp5_cfg global pointer's init can we access the hw */
+ for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
+ if (mdp5_configs[i].revision != minor)
+ continue;
+ mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
+ break;
+ }
+ if (unlikely(!mdp5_kms->hw_cfg)) {
+ dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp5_kms->dev;
+ int i;
+
+ pm_runtime_get_sync(dev->dev);
+
/* Magic unknown register writes:
*
* W VBIF:0x004 00000001 (mdss_mdp.c:839)
@@ -78,15 +191,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
-out:
+ for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
+
pm_runtime_put_sync(dev->dev);
- return ret;
+ return 0;
}
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -161,7 +272,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe crtcs[] = {
- SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -169,7 +280,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
int i, ret;
/* construct CRTCs: */
- for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+ for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
struct drm_crtc *crtc;
@@ -246,7 +357,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- int ret;
+ int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms) {
@@ -307,20 +418,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+ ret = mdp5_select_hw_cfg(kms);
+ if (ret)
+ goto fail;
+
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+ for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_disable(mdp5_kms);
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(dev, config->iommu);
+ mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -368,5 +481,11 @@ static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
#ifdef CONFIG_OF
/* TODO */
#endif
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
+ /* TODO hard-coded in downstream mdss, but should it be? */
+ config.max_clk = 200000000;
+ /* TODO get from DT: */
+ config.smp_blk_cnt = 22;
+
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 6e981b692d1d..5bf340dd0f00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,6 +21,24 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
+/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
+#define MDP5_MAX_BASES 8
+struct mdp5_sub_block {
+ int count;
+ uint32_t base[MDP5_MAX_BASES];
+};
+struct mdp5_config {
+ char *name;
+ struct mdp5_sub_block ctl;
+ struct mdp5_sub_block pipe_vig;
+ struct mdp5_sub_block pipe_rgb;
+ struct mdp5_sub_block pipe_dma;
+ struct mdp5_sub_block lm;
+ struct mdp5_sub_block dspp;
+ struct mdp5_sub_block ad;
+ struct mdp5_sub_block intf;
+};
+extern const struct mdp5_config *mdp5_cfg;
#include "mdp5.xml.h"
#include "mdp5_smp.h"
@@ -30,6 +48,7 @@ struct mdp5_kms {
struct drm_device *dev;
int rev;
+ const struct mdp5_config *hw_cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
@@ -82,6 +101,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
NAME(VIG0), NAME(VIG1), NAME(VIG2),
NAME(RGB0), NAME(RGB1), NAME(RGB2),
NAME(DMA0), NAME(DMA1),
+ NAME(VIG3), NAME(RGB3),
#undef NAME
};
return names[pipe];
@@ -98,6 +118,8 @@ static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
default: return 0;
}
}
@@ -108,6 +130,7 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
case SSPP_RGB0:
case SSPP_RGB1:
case SSPP_RGB2:
+ case SSPP_RGB3:
return 1;
default:
return 3;
@@ -126,6 +149,8 @@ static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
+ case SSPP_VIG3: return CID_VIG3_Y + plane;
+ case SSPP_RGB3: return CID_RGB3;
default: return CID_UNUSED;
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index a9629b85b983..64c1afd6030a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a5d87db5c23..b447c01ad89c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
struct msm_kms *kms;
int ret;
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev->dev, "failed to allocate private data\n");
@@ -314,13 +313,15 @@ fail:
static void load_gpu(struct drm_device *dev)
{
+ static DEFINE_MUTEX(init_lock);
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu;
+ mutex_lock(&init_lock);
+
if (priv->gpu)
- return;
+ goto out;
- mutex_lock(&dev->struct_mutex);
gpu = a3xx_gpu_init(dev);
if (IS_ERR(gpu)) {
dev_warn(dev->dev, "failed to load a3xx gpu\n");
@@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev)
if (gpu) {
int ret;
+ mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
+ mutex_unlock(&dev->struct_mutex);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
@@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev)
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
-
}
priv->gpu = gpu;
- mutex_unlock(&dev->struct_mutex);
+out:
+ mutex_unlock(&init_lock);
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
@@ -906,25 +909,22 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-static int msm_drm_add_components(struct device *master, struct master *m)
+static int add_components(struct device *dev, struct component_match **matchptr,
+ const char *name)
{
- struct device_node *np = master->of_node;
+ struct device_node *np = dev->of_node;
unsigned i;
- int ret;
for (i = 0; ; i++) {
struct device_node *node;
- node = of_parse_phandle(np, "connectors", i);
+ node = of_parse_phandle(np, name, i);
if (!node)
break;
- ret = component_master_add_child(m, compare_of, node);
- of_node_put(node);
-
- if (ret)
- return ret;
+ component_match_add(dev, matchptr, compare_of, node);
}
+
return 0;
}
#else
@@ -932,9 +932,34 @@ static int compare_dev(struct device *dev, void *data)
{
return dev == data;
}
+#endif
+
+static int msm_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&msm_driver, to_platform_device(dev));
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
+};
+
+/*
+ * Platform driver:
+ */
-static int msm_drm_add_components(struct device *master, struct master *m)
+static int msm_pdev_probe(struct platform_device *pdev)
{
+ struct component_match *match = NULL;
+#ifdef CONFIG_OF
+ add_components(&pdev->dev, &match, "connectors");
+ add_components(&pdev->dev, &match, "gpus");
+#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
* they are simply not present). But for non-DT we only need to
@@ -958,41 +983,12 @@ static int msm_drm_add_components(struct device *master, struct master *m)
return -EPROBE_DEFER;
}
- ret = component_master_add_child(m, compare_dev, dev);
- if (ret) {
- DBG("could not add child: %d", ret);
- return ret;
- }
+ component_match_add(&pdev->dev, &match, compare_dev, dev);
}
-
- return 0;
-}
#endif
-static int msm_drm_bind(struct device *dev)
-{
- return drm_platform_init(&msm_driver, to_platform_device(dev));
-}
-
-static void msm_drm_unbind(struct device *dev)
-{
- drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
-}
-
-static const struct component_master_ops msm_drm_ops = {
- .add_components = msm_drm_add_components,
- .bind = msm_drm_bind,
- .unbind = msm_drm_unbind,
-};
-
-/*
- * Platform driver:
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
-{
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- return component_master_add(&pdev->dev, &msm_drm_ops);
+ return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
}
static int msm_pdev_remove(struct platform_device *pdev)
@@ -1008,7 +1004,8 @@ static const struct platform_device_id msm_id[] = {
};
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdss_mdp" },
+ { .compatible = "qcom,mdp" }, /* mdp4 */
+ { .compatible = "qcom,mdss_mdp" }, /* mdp5 */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 5107fc4826bc..9c5221ce391a 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -19,6 +19,11 @@
#include "drm_crtc.h"
#include "drm_fb_helper.h"
+#include "msm_gem.h"
+
+extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
/*
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
@@ -43,6 +48,7 @@ static struct fb_ops msm_fb_ops = {
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
+ .fb_mmap = msm_fbdev_mmap,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -51,6 +57,31 @@ static struct fb_ops msm_fb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_gem_object *drm_obj = fbdev->bo;
+ struct drm_device *dev = helper->dev;
+ int ret = 0;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret) {
+ pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
+ return ret;
+ }
+
+ return msm_gem_mmap_obj(drm_obj, vma);
+}
+
static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -104,8 +135,16 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- /* TODO implement our own fb_mmap so we don't need this: */
- msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ /*
+ * NOTE: if we can be guaranteed to be able to map buffer
+ * in panic (ie. lock-safe, etc) we could avoid pinning the
+ * buffer now:
+ */
+ ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ if (ret) {
+ dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ goto fail;
+ }
fbi = framebuffer_alloc(0, dev->dev);
if (!fbi) {
@@ -177,7 +216,7 @@ static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
DBG("fbdev: get gamma");
}
-static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
.gamma_set = msm_crtc_fb_gamma_set,
.gamma_get = msm_crtc_fb_gamma_get,
.fb_probe = msm_fbdev_create,
@@ -189,7 +228,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_fbdev *fbdev = NULL;
struct drm_fb_helper *helper;
- int ret = 0;
+ int ret;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
@@ -197,7 +236,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
helper = &fbdev->base;
- helper->funcs = &msm_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper,
priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 690d7e7b6d1e..4b1b82adabde 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT;
if (iommu_present(&platform_bus_type))
- p = drm_gem_get_pages(obj, 0);
+ p = drm_gem_get_pages(obj);
else
p = get_pages_vram(obj, npages);
@@ -278,24 +278,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct drm_device *dev = obj->dev;
int ret = 0;
if (!msm_obj->domain[id].iova) {
struct msm_drm_private *priv = obj->dev->dev_private;
- struct msm_mmu *mmu = priv->mmus[id];
struct page **pages = get_pages(obj);
- if (!mmu) {
- dev_err(dev->dev, "null MMU pointer\n");
- return -EINVAL;
- }
-
if (IS_ERR(pages))
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- uint32_t offset = (uint32_t)mmap_offset(obj);
+ struct msm_mmu *mmu = priv->mmus[id];
+ uint32_t offset;
+
+ if (WARN_ON(!mmu))
+ return -EINVAL;
+
+ offset = (uint32_t)mmap_offset(obj);
ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
obj->size, IOMMU_READ | IOMMU_WRITE);
msm_obj->domain[id].iova = offset;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c6322197db8c..4a0dce587745 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -606,14 +606,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(drm, iommu);
+ gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
gpu->id = msm_register_mmu(drm, gpu->mmu);
+
/* Create ringbuffer: */
+ mutex_lock(&drm->struct_mutex);
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->rb)) {
ret = PTR_ERR(gpu->rb);
gpu->rb = NULL;
@@ -621,13 +624,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
- goto fail;
- }
-
bs_init(gpu);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4b2ad9181edf..099af483fdf0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -33,39 +33,14 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
- struct drm_device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i, ret;
-
- for (i = 0; i < cnt; i++) {
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_warn(dev->dev, "couldn't get %s context", names[i]);
- continue;
- }
- ret = iommu_attach_device(iommu->domain, ctx);
- if (ret) {
- dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
- return ret;
- }
- }
-
- return 0;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i;
-
- for (i = 0; i < cnt; i++) {
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx))
- continue;
- iommu_detach_device(iommu->domain, ctx);
- }
+ iommu_detach_device(iommu->domain, mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 21da6d154f71..7cd88d9dc155 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -32,17 +32,17 @@ struct msm_mmu_funcs {
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
- struct drm_device *dev;
+ struct device *dev;
};
-static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
const struct msm_mmu_funcs *funcs)
{
mmu->dev = dev;
mmu->funcs = funcs;
}
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 637c29a33127..40afc69a3778 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,5 +1,5 @@
config DRM_NOUVEAU
- tristate "Nouveau (nVidia) cards"
+ tristate "Nouveau (NVIDIA) cards"
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
@@ -23,7 +23,15 @@ config DRM_NOUVEAU
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
help
- Choose this option for open-source nVidia support.
+ Choose this option for open-source NVIDIA support.
+
+config NOUVEAU_PLATFORM_DRIVER
+ tristate "Nouveau (NVIDIA) SoC GPUs"
+ depends on DRM_NOUVEAU && ARCH_TEGRA
+ default y
+ help
+ Support for Nouveau platform driver, used for SoC GPUs as found
+ on NVIDIA Tegra K1.
config NOUVEAU_DEBUG
int "Maximum debug level"
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 8b307e143632..f5d7f7ce4bc6 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -14,8 +14,10 @@ nouveau-y += core/core/enum.o
nouveau-y += core/core/event.o
nouveau-y += core/core/gpuobj.o
nouveau-y += core/core/handle.o
+nouveau-y += core/core/ioctl.o
nouveau-y += core/core/mm.o
nouveau-y += core/core/namedb.o
+nouveau-y += core/core/notify.o
nouveau-y += core/core/object.o
nouveau-y += core/core/option.o
nouveau-y += core/core/parent.o
@@ -26,6 +28,7 @@ nouveau-y += core/core/subdev.o
nouveau-y += core/subdev/bar/base.o
nouveau-y += core/subdev/bar/nv50.o
nouveau-y += core/subdev/bar/nvc0.o
+nouveau-y += core/subdev/bar/gk20a.o
nouveau-y += core/subdev/bios/base.o
nouveau-y += core/subdev/bios/bit.o
nouveau-y += core/subdev/bios/boost.o
@@ -64,6 +67,7 @@ nouveau-y += core/subdev/clock/nva3.o
nouveau-y += core/subdev/clock/nvaa.o
nouveau-y += core/subdev/clock/nvc0.o
nouveau-y += core/subdev/clock/nve0.o
+nouveau-y += core/subdev/clock/gk20a.o
nouveau-y += core/subdev/clock/pllnv04.o
nouveau-y += core/subdev/clock/pllnva3.o
nouveau-y += core/subdev/devinit/base.o
@@ -149,8 +153,10 @@ nouveau-y += core/subdev/instmem/base.o
nouveau-y += core/subdev/instmem/nv04.o
nouveau-y += core/subdev/instmem/nv40.o
nouveau-y += core/subdev/instmem/nv50.o
-nouveau-y += core/subdev/ltcg/gf100.o
-nouveau-y += core/subdev/ltcg/gm107.o
+nouveau-y += core/subdev/ltc/base.o
+nouveau-y += core/subdev/ltc/gf100.o
+nouveau-y += core/subdev/ltc/gk104.o
+nouveau-y += core/subdev/ltc/gm107.o
nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
@@ -161,6 +167,7 @@ nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o
nouveau-y += core/subdev/mc/nvc0.o
nouveau-y += core/subdev/mc/nvc3.o
+nouveau-y += core/subdev/mc/gk20a.o
nouveau-y += core/subdev/mxm/base.o
nouveau-y += core/subdev/mxm/mxms.o
nouveau-y += core/subdev/mxm/nv50.o
@@ -169,6 +176,7 @@ nouveau-y += core/subdev/pwr/memx.o
nouveau-y += core/subdev/pwr/nva3.o
nouveau-y += core/subdev/pwr/nvc0.o
nouveau-y += core/subdev/pwr/nvd0.o
+nouveau-y += core/subdev/pwr/gk104.o
nouveau-y += core/subdev/pwr/nv108.o
nouveau-y += core/subdev/therm/base.o
nouveau-y += core/subdev/therm/fan.o
@@ -211,6 +219,7 @@ nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/copy/nve0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/device/acpi.o
nouveau-y += core/engine/device/base.o
nouveau-y += core/engine/device/ctrl.o
nouveau-y += core/engine/device/nv04.o
@@ -270,6 +279,7 @@ nouveau-y += core/engine/graph/ctxnvd9.o
nouveau-y += core/engine/graph/ctxnve4.o
nouveau-y += core/engine/graph/ctxgk20a.o
nouveau-y += core/engine/graph/ctxnvf0.o
+nouveau-y += core/engine/graph/ctxgk110b.o
nouveau-y += core/engine/graph/ctxnv108.o
nouveau-y += core/engine/graph/ctxgm107.o
nouveau-y += core/engine/graph/nv04.o
@@ -291,6 +301,7 @@ nouveau-y += core/engine/graph/nvd9.o
nouveau-y += core/engine/graph/nve4.o
nouveau-y += core/engine/graph/gk20a.o
nouveau-y += core/engine/graph/nvf0.o
+nouveau-y += core/engine/graph/gk110b.o
nouveau-y += core/engine/graph/nv108.o
nouveau-y += core/engine/graph/gm107.o
nouveau-y += core/engine/mpeg/nv31.o
@@ -318,11 +329,18 @@ nouveau-y += core/engine/vp/nv98.o
nouveau-y += core/engine/vp/nvc0.o
nouveau-y += core/engine/vp/nve0.o
+# nvif
+nouveau-y += nvif/object.o
+nouveau-y += nvif/client.o
+nouveau-y += nvif/device.o
+nouveau-y += nvif/notify.o
+
# drm/core
nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
nouveau-y += nouveau_vga.o nouveau_agp.o
nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
nouveau-y += nouveau_prime.o nouveau_abi16.o
+nouveau-y += nouveau_nvif.o nouveau_usif.o
nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
@@ -349,3 +367,6 @@ nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
+
+# platform driver
+obj-$(CONFIG_NOUVEAU_PLATFORM_DRIVER) += nouveau_platform.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 9079c0ac58e6..10598dede9e9 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -26,13 +26,167 @@
#include <core/client.h>
#include <core/handle.h>
#include <core/option.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+
+#include <nvif/unpack.h>
+#include <nvif/event.h>
#include <engine/device.h>
+struct nvkm_client_notify {
+ struct nouveau_client *client;
+ struct nvkm_notify n;
+ u8 version;
+ u8 size;
+ union {
+ struct nvif_notify_rep_v0 v0;
+ } rep;
+};
+
+static int
+nvkm_client_notify(struct nvkm_notify *n)
+{
+ struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n);
+ struct nouveau_client *client = notify->client;
+ return client->ntfy(&notify->rep, notify->size, n->data, n->size);
+}
+
+int
+nvkm_client_notify_put(struct nouveau_client *client, int index)
+{
+ if (index < ARRAY_SIZE(client->notify)) {
+ if (client->notify[index]) {
+ nvkm_notify_put(&client->notify[index]->n);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int
+nvkm_client_notify_get(struct nouveau_client *client, int index)
+{
+ if (index < ARRAY_SIZE(client->notify)) {
+ if (client->notify[index]) {
+ nvkm_notify_get(&client->notify[index]->n);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int
+nvkm_client_notify_del(struct nouveau_client *client, int index)
+{
+ if (index < ARRAY_SIZE(client->notify)) {
+ if (client->notify[index]) {
+ nvkm_notify_fini(&client->notify[index]->n);
+ kfree(client->notify[index]);
+ client->notify[index] = NULL;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int
+nvkm_client_notify_new(struct nouveau_client *client,
+ struct nvkm_event *event, void *data, u32 size)
+{
+ struct nvkm_client_notify *notify;
+ union {
+ struct nvif_notify_req_v0 v0;
+ } *req = data;
+ u8 index, reply;
+ int ret;
+
+ for (index = 0; index < ARRAY_SIZE(client->notify); index++) {
+ if (!client->notify[index])
+ break;
+ }
+
+ if (index == ARRAY_SIZE(client->notify))
+ return -ENOSPC;
+
+ notify = kzalloc(sizeof(*notify), GFP_KERNEL);
+ if (!notify)
+ return -ENOMEM;
+
+ nv_ioctl(client, "notify new size %d\n", size);
+ if (nvif_unpack(req->v0, 0, 0, true)) {
+ nv_ioctl(client, "notify new vers %d reply %d route %02x "
+ "token %llx\n", req->v0.version,
+ req->v0.reply, req->v0.route, req->v0.token);
+ notify->version = req->v0.version;
+ notify->size = sizeof(notify->rep.v0);
+ notify->rep.v0.version = req->v0.version;
+ notify->rep.v0.route = req->v0.route;
+ notify->rep.v0.token = req->v0.token;
+ reply = req->v0.reply;
+ }
+
+ if (ret == 0) {
+ ret = nvkm_notify_init(event, nvkm_client_notify, false,
+ data, size, reply, &notify->n);
+ if (ret == 0) {
+ client->notify[index] = notify;
+ notify->client = client;
+ return 0;
+ }
+ }
+
+ kfree(notify);
+ return 0;
+}
+
+static int
+nouveau_client_devlist(struct nouveau_object *object, void *data, u32 size)
+{
+ union {
+ struct nv_client_devlist_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "client devlist size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "client devlist vers %d count %d\n",
+ args->v0.version, args->v0.count);
+ if (size == sizeof(args->v0.device[0]) * args->v0.count) {
+ ret = nouveau_device_list(args->v0.device,
+ args->v0.count);
+ if (ret >= 0) {
+ args->v0.count = ret;
+ ret = 0;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nouveau_client_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NV_CLIENT_DEVLIST:
+ return nouveau_client_devlist(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_client_dtor(struct nouveau_object *object)
{
struct nouveau_client *client = (void *)object;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+ nvkm_client_notify_del(client, i);
nouveau_object_ref(NULL, &client->device);
nouveau_handle_destroy(client->root);
nouveau_namedb_destroy(&client->base);
@@ -42,6 +196,7 @@ static struct nouveau_oclass
nouveau_client_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.dtor = nouveau_client_dtor,
+ .mthd = nouveau_client_mthd,
},
};
@@ -93,9 +248,12 @@ int
nouveau_client_fini(struct nouveau_client *client, bool suspend)
{
const char *name[2] = { "fini", "suspend" };
- int ret;
-
+ int ret, i;
nv_debug(client, "%s running\n", name[suspend]);
+ nv_debug(client, "%s notify\n", name[suspend]);
+ for (i = 0; i < ARRAY_SIZE(client->notify); i++)
+ nvkm_client_notify_put(client, i);
+ nv_debug(client, "%s object\n", name[suspend]);
ret = nouveau_handle_fini(client->root, suspend);
nv_debug(client, "%s completed with %d\n", name[suspend], ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ae81d3b5d8b7..0540a48c5678 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2013 Red Hat Inc.
+ * Copyright 2013-2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,173 +24,77 @@
#include <core/event.h>
void
-nouveau_event_put(struct nouveau_eventh *handler)
+nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
- struct nouveau_event *event = handler->event;
- unsigned long flags;
- u32 m, t;
-
- if (!__test_and_clear_bit(NVKM_EVENT_ENABLE, &handler->flags))
- return;
-
- spin_lock_irqsave(&event->refs_lock, flags);
- for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
- if (!--event->refs[handler->index * event->types_nr + t]) {
- if (event->disable)
- event->disable(event, 1 << t, handler->index);
+ BUG_ON(!spin_is_locked(&event->refs_lock));
+ while (types) {
+ int type = __ffs(types); types &= ~(1 << type);
+ if (--event->refs[index * event->types_nr + type] == 0) {
+ if (event->func->fini)
+ event->func->fini(event, 1 << type, index);
}
-
}
- spin_unlock_irqrestore(&event->refs_lock, flags);
}
void
-nouveau_event_get(struct nouveau_eventh *handler)
+nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
- struct nouveau_event *event = handler->event;
- unsigned long flags;
- u32 m, t;
-
- if (__test_and_set_bit(NVKM_EVENT_ENABLE, &handler->flags))
- return;
-
- spin_lock_irqsave(&event->refs_lock, flags);
- for (m = handler->types; t = __ffs(m), m; m &= ~(1 << t)) {
- if (!event->refs[handler->index * event->types_nr + t]++) {
- if (event->enable)
- event->enable(event, 1 << t, handler->index);
+ BUG_ON(!spin_is_locked(&event->refs_lock));
+ while (types) {
+ int type = __ffs(types); types &= ~(1 << type);
+ if (++event->refs[index * event->types_nr + type] == 1) {
+ if (event->func->init)
+ event->func->init(event, 1 << type, index);
}
-
}
- spin_unlock_irqrestore(&event->refs_lock, flags);
-}
-
-static void
-nouveau_event_fini(struct nouveau_eventh *handler)
-{
- struct nouveau_event *event = handler->event;
- unsigned long flags;
- nouveau_event_put(handler);
- spin_lock_irqsave(&event->list_lock, flags);
- list_del(&handler->head);
- spin_unlock_irqrestore(&event->list_lock, flags);
-}
-
-static int
-nouveau_event_init(struct nouveau_event *event, u32 types, int index,
- int (*func)(void *, u32, int), void *priv,
- struct nouveau_eventh *handler)
-{
- unsigned long flags;
-
- if (types & ~((1 << event->types_nr) - 1))
- return -EINVAL;
- if (index >= event->index_nr)
- return -EINVAL;
-
- handler->event = event;
- handler->flags = 0;
- handler->types = types;
- handler->index = index;
- handler->func = func;
- handler->priv = priv;
-
- spin_lock_irqsave(&event->list_lock, flags);
- list_add_tail(&handler->head, &event->list[index]);
- spin_unlock_irqrestore(&event->list_lock, flags);
- return 0;
-}
-
-int
-nouveau_event_new(struct nouveau_event *event, u32 types, int index,
- int (*func)(void *, u32, int), void *priv,
- struct nouveau_eventh **phandler)
-{
- struct nouveau_eventh *handler;
- int ret = -ENOMEM;
-
- if (event->check) {
- ret = event->check(event, types, index);
- if (ret)
- return ret;
- }
-
- handler = *phandler = kmalloc(sizeof(*handler), GFP_KERNEL);
- if (handler) {
- ret = nouveau_event_init(event, types, index, func, priv, handler);
- if (ret)
- kfree(handler);
- }
-
- return ret;
-}
-
-void
-nouveau_event_ref(struct nouveau_eventh *handler, struct nouveau_eventh **ref)
-{
- BUG_ON(handler != NULL);
- if (*ref) {
- nouveau_event_fini(*ref);
- kfree(*ref);
- }
- *ref = handler;
}
void
-nouveau_event_trigger(struct nouveau_event *event, u32 types, int index)
+nvkm_event_send(struct nvkm_event *event, u32 types, int index,
+ void *data, u32 size)
{
- struct nouveau_eventh *handler;
+ struct nvkm_notify *notify;
unsigned long flags;
- if (WARN_ON(index >= event->index_nr))
+ if (!event->refs || WARN_ON(index >= event->index_nr))
return;
spin_lock_irqsave(&event->list_lock, flags);
- list_for_each_entry(handler, &event->list[index], head) {
- if (!test_bit(NVKM_EVENT_ENABLE, &handler->flags))
- continue;
- if (!(handler->types & types))
- continue;
- if (handler->func(handler->priv, handler->types & types, index)
- != NVKM_EVENT_DROP)
- continue;
- nouveau_event_put(handler);
+ list_for_each_entry(notify, &event->list, head) {
+ if (notify->index == index && (notify->types & types)) {
+ if (event->func->send) {
+ event->func->send(data, size, notify);
+ continue;
+ }
+ nvkm_notify_send(notify, data, size);
+ }
}
spin_unlock_irqrestore(&event->list_lock, flags);
}
void
-nouveau_event_destroy(struct nouveau_event **pevent)
+nvkm_event_fini(struct nvkm_event *event)
{
- struct nouveau_event *event = *pevent;
- if (event) {
- kfree(event);
- *pevent = NULL;
+ if (event->refs) {
+ kfree(event->refs);
+ event->refs = NULL;
}
}
int
-nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **pevent)
+nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr,
+ struct nvkm_event *event)
{
- struct nouveau_event *event;
- int i;
-
- event = *pevent = kzalloc(sizeof(*event) + (index_nr * types_nr) *
- sizeof(event->refs[0]), GFP_KERNEL);
- if (!event)
- return -ENOMEM;
-
- event->list = kmalloc(sizeof(*event->list) * index_nr, GFP_KERNEL);
- if (!event->list) {
- kfree(event);
+ event->refs = kzalloc(sizeof(*event->refs) * index_nr * types_nr,
+ GFP_KERNEL);
+ if (!event->refs)
return -ENOMEM;
- }
- spin_lock_init(&event->list_lock);
- spin_lock_init(&event->refs_lock);
- for (i = 0; i < index_nr; i++)
- INIT_LIST_HEAD(&event->list[i]);
+ event->func = func;
event->types_nr = types_nr;
event->index_nr = index_nr;
+ spin_lock_init(&event->refs_lock);
+ spin_lock_init(&event->list_lock);
+ INIT_LIST_HEAD(&event->list);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index 264c2b338ac3..a490b805d7e3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -146,9 +146,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
}
hprintk(handle, TRACE, "created\n");
-
*phandle = handle;
-
return 0;
}
@@ -224,3 +222,116 @@ nouveau_handle_put(struct nouveau_handle *handle)
if (handle)
nouveau_namedb_put(handle);
}
+
+int
+nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle,
+ u16 _oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *engctx = NULL;
+ struct nouveau_object *object = NULL;
+ struct nouveau_object *engine;
+ struct nouveau_oclass *oclass;
+ struct nouveau_handle *handle;
+ int ret;
+
+ /* lookup parent object and ensure it *is* a parent */
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent) {
+ nv_error(client, "parent 0x%08x not found\n", _parent);
+ return -ENOENT;
+ }
+
+ if (!nv_iclass(parent, NV_PARENT_CLASS)) {
+ nv_error(parent, "cannot have children\n");
+ ret = -EINVAL;
+ goto fail_class;
+ }
+
+ /* check that parent supports the requested subclass */
+ ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
+ if (ret) {
+ nv_debug(parent, "illegal class 0x%04x\n", _oclass);
+ goto fail_class;
+ }
+
+ /* make sure engine init has been completed *before* any objects
+ * it controls are created - the constructors may depend on
+ * state calculated at init (ie. default context construction)
+ */
+ if (engine) {
+ ret = nouveau_object_inc(engine);
+ if (ret)
+ goto fail_class;
+ }
+
+ /* if engine requires it, create a context object to insert
+ * between the parent and its children (eg. PGRAPH context)
+ */
+ if (engine && nv_engine(engine)->cclass) {
+ ret = nouveau_object_ctor(parent, engine,
+ nv_engine(engine)->cclass,
+ data, size, &engctx);
+ if (ret)
+ goto fail_engctx;
+ } else {
+ nouveau_object_ref(parent, &engctx);
+ }
+
+ /* finally, create new object and bind it to its handle */
+ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+ *pobject = object;
+ if (ret)
+ goto fail_ctor;
+
+ ret = nouveau_object_inc(object);
+ if (ret)
+ goto fail_init;
+
+ ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
+ if (ret)
+ goto fail_handle;
+
+ ret = nouveau_handle_init(handle);
+ if (ret)
+ nouveau_handle_destroy(handle);
+
+fail_handle:
+ nouveau_object_dec(object, false);
+fail_init:
+ nouveau_object_ref(NULL, &object);
+fail_ctor:
+ nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+ if (engine)
+ nouveau_object_dec(engine, false);
+fail_class:
+ nouveau_object_ref(NULL, &parent);
+ return ret;
+}
+
+int
+nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle)
+{
+ struct nouveau_object *parent = NULL;
+ struct nouveau_object *namedb = NULL;
+ struct nouveau_handle *handle = NULL;
+
+ parent = nouveau_handle_ref(client, _parent);
+ if (!parent)
+ return -ENOENT;
+
+ namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
+ if (namedb) {
+ handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
+ if (handle) {
+ nouveau_namedb_put(handle);
+ nouveau_handle_fini(handle, false);
+ nouveau_handle_destroy(handle);
+ }
+ }
+
+ nouveau_object_ref(NULL, &parent);
+ return handle ? 0 : -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/ioctl.c b/drivers/gpu/drm/nouveau/core/core/ioctl.c
new file mode 100644
index 000000000000..f7e19bfb489c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/ioctl.c
@@ -0,0 +1,531 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/object.h>
+#include <core/parent.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/ioctl.h>
+#include <core/event.h>
+
+#include <nvif/unpack.h>
+#include <nvif/ioctl.h>
+
+static int
+nvkm_ioctl_nop(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_nop none;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "nop size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "nop\n");
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_sclass(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_sclass_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (!nv_iclass(object, NV_PARENT_CLASS)) {
+ nv_debug(object, "cannot have children (sclass)\n");
+ return -ENODEV;
+ }
+
+ nv_ioctl(object, "sclass size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "sclass vers %d count %d\n",
+ args->v0.version, args->v0.count);
+ if (size == args->v0.count * sizeof(args->v0.oclass[0])) {
+ ret = nouveau_parent_lclass(object, args->v0.oclass,
+ args->v0.count);
+ if (ret >= 0) {
+ args->v0.count = ret;
+ ret = 0;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_new(struct nouveau_handle *parent, void *data, u32 size)
+{
+ union {
+ struct nvif_ioctl_new_v0 v0;
+ } *args = data;
+ struct nouveau_client *client = nouveau_client(parent->object);
+ struct nouveau_object *engctx = NULL;
+ struct nouveau_object *object = NULL;
+ struct nouveau_object *engine;
+ struct nouveau_oclass *oclass;
+ struct nouveau_handle *handle;
+ u32 _handle, _oclass;
+ int ret;
+
+ nv_ioctl(client, "new size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ _handle = args->v0.handle;
+ _oclass = args->v0.oclass;
+ } else
+ return ret;
+
+ nv_ioctl(client, "new vers %d handle %08x class %08x "
+ "route %02x token %llx\n",
+ args->v0.version, _handle, _oclass,
+ args->v0.route, args->v0.token);
+
+ if (!nv_iclass(parent->object, NV_PARENT_CLASS)) {
+ nv_debug(parent->object, "cannot have children (ctor)\n");
+ ret = -ENODEV;
+ goto fail_class;
+ }
+
+ /* check that parent supports the requested subclass */
+ ret = nouveau_parent_sclass(parent->object, _oclass, &engine, &oclass);
+ if (ret) {
+ nv_debug(parent->object, "illegal class 0x%04x\n", _oclass);
+ goto fail_class;
+ }
+
+ /* make sure engine init has been completed *before* any objects
+ * it controls are created - the constructors may depend on
+ * state calculated at init (ie. default context construction)
+ */
+ if (engine) {
+ ret = nouveau_object_inc(engine);
+ if (ret)
+ goto fail_class;
+ }
+
+ /* if engine requires it, create a context object to insert
+ * between the parent and its children (eg. PGRAPH context)
+ */
+ if (engine && nv_engine(engine)->cclass) {
+ ret = nouveau_object_ctor(parent->object, engine,
+ nv_engine(engine)->cclass,
+ data, size, &engctx);
+ if (ret)
+ goto fail_engctx;
+ } else {
+ nouveau_object_ref(parent->object, &engctx);
+ }
+
+ /* finally, create new object and bind it to its handle */
+ ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
+ client->data = object;
+ if (ret)
+ goto fail_ctor;
+
+ ret = nouveau_object_inc(object);
+ if (ret)
+ goto fail_init;
+
+ ret = nouveau_handle_create(parent->object, parent->name,
+ _handle, object, &handle);
+ if (ret)
+ goto fail_handle;
+
+ ret = nouveau_handle_init(handle);
+ handle->route = args->v0.route;
+ handle->token = args->v0.token;
+ if (ret)
+ nouveau_handle_destroy(handle);
+
+fail_handle:
+ nouveau_object_dec(object, false);
+fail_init:
+ nouveau_object_ref(NULL, &object);
+fail_ctor:
+ nouveau_object_ref(NULL, &engctx);
+fail_engctx:
+ if (engine)
+ nouveau_object_dec(engine, false);
+fail_class:
+ return ret;
+}
+
+static int
+nvkm_ioctl_del(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_del none;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "delete size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "delete\n");
+ nouveau_handle_fini(handle, false);
+ nouveau_handle_destroy(handle);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_mthd(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_mthd_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "mthd vers %d mthd %02x\n",
+ args->v0.version, args->v0.method);
+ if (ret = -ENODEV, ofuncs->mthd)
+ ret = ofuncs->mthd(object, args->v0.method, data, size);
+ }
+
+ return ret;
+}
+
+
+static int
+nvkm_ioctl_rd(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_rd_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "rd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "rd vers %d size %d addr %016llx\n",
+ args->v0.version, args->v0.size, args->v0.addr);
+ switch (args->v0.size) {
+ case 1:
+ if (ret = -ENODEV, ofuncs->rd08) {
+ args->v0.data = nv_ro08(object, args->v0.addr);
+ ret = 0;
+ }
+ break;
+ case 2:
+ if (ret = -ENODEV, ofuncs->rd16) {
+ args->v0.data = nv_ro16(object, args->v0.addr);
+ ret = 0;
+ }
+ break;
+ case 4:
+ if (ret = -ENODEV, ofuncs->rd32) {
+ args->v0.data = nv_ro32(object, args->v0.addr);
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_wr(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_wr_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "wr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "wr vers %d size %d addr %016llx data %08x\n",
+ args->v0.version, args->v0.size, args->v0.addr,
+ args->v0.data);
+ switch (args->v0.size) {
+ case 1:
+ if (ret = -ENODEV, ofuncs->wr08) {
+ nv_wo08(object, args->v0.addr, args->v0.data);
+ ret = 0;
+ }
+ break;
+ case 2:
+ if (ret = -ENODEV, ofuncs->wr16) {
+ nv_wo16(object, args->v0.addr, args->v0.data);
+ ret = 0;
+ }
+ break;
+ case 4:
+ if (ret = -ENODEV, ofuncs->wr32) {
+ nv_wo32(object, args->v0.addr, args->v0.data);
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_map(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_map_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "map size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "map vers %d\n", args->v0.version);
+ if (ret = -ENODEV, ofuncs->map) {
+ ret = ofuncs->map(object, &args->v0.handle,
+ &args->v0.length);
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_unmap(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_unmap none;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "unmap size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "unmap\n");
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_new(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ struct nouveau_ofuncs *ofuncs = object->oclass->ofuncs;
+ union {
+ struct nvif_ioctl_ntfy_new_v0 v0;
+ } *args = data;
+ struct nvkm_event *event;
+ int ret;
+
+ nv_ioctl(object, "ntfy new size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "ntfy new vers %d event %02x\n",
+ args->v0.version, args->v0.event);
+ if (ret = -ENODEV, ofuncs->ntfy)
+ ret = ofuncs->ntfy(object, args->v0.event, &event);
+ if (ret == 0) {
+ ret = nvkm_client_notify_new(client, event, data, size);
+ if (ret >= 0) {
+ args->v0.index = ret;
+ ret = 0;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_del(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_ntfy_del_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "ntfy del size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "ntfy del vers %d index %d\n",
+ args->v0.version, args->v0.index);
+ ret = nvkm_client_notify_del(client, args->v0.index);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_get(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_ntfy_get_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "ntfy get size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "ntfy get vers %d index %d\n",
+ args->v0.version, args->v0.index);
+ ret = nvkm_client_notify_get(client, args->v0.index);
+ }
+
+ return ret;
+}
+
+static int
+nvkm_ioctl_ntfy_put(struct nouveau_handle *handle, void *data, u32 size)
+{
+ struct nouveau_client *client = nouveau_client(handle->object);
+ struct nouveau_object *object = handle->object;
+ union {
+ struct nvif_ioctl_ntfy_put_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "ntfy put size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "ntfy put vers %d index %d\n",
+ args->v0.version, args->v0.index);
+ ret = nvkm_client_notify_put(client, args->v0.index);
+ }
+
+ return ret;
+}
+
+static struct {
+ int version;
+ int (*func)(struct nouveau_handle *, void *, u32);
+}
+nvkm_ioctl_v0[] = {
+ { 0x00, nvkm_ioctl_nop },
+ { 0x00, nvkm_ioctl_sclass },
+ { 0x00, nvkm_ioctl_new },
+ { 0x00, nvkm_ioctl_del },
+ { 0x00, nvkm_ioctl_mthd },
+ { 0x00, nvkm_ioctl_rd },
+ { 0x00, nvkm_ioctl_wr },
+ { 0x00, nvkm_ioctl_map },
+ { 0x00, nvkm_ioctl_unmap },
+ { 0x00, nvkm_ioctl_ntfy_new },
+ { 0x00, nvkm_ioctl_ntfy_del },
+ { 0x00, nvkm_ioctl_ntfy_get },
+ { 0x00, nvkm_ioctl_ntfy_put },
+};
+
+static int
+nvkm_ioctl_path(struct nouveau_handle *parent, u32 type, u32 nr,
+ u32 *path, void *data, u32 size,
+ u8 owner, u8 *route, u64 *token)
+{
+ struct nouveau_handle *handle = parent;
+ struct nouveau_namedb *namedb;
+ struct nouveau_object *object;
+ int ret;
+
+ while ((object = parent->object), nr--) {
+ nv_ioctl(object, "path 0x%08x\n", path[nr]);
+ if (!nv_iclass(object, NV_PARENT_CLASS)) {
+ nv_debug(object, "cannot have children (path)\n");
+ return -EINVAL;
+ }
+
+ if (!(namedb = (void *)nv_pclass(object, NV_NAMEDB_CLASS)) ||
+ !(handle = nouveau_namedb_get(namedb, path[nr]))) {
+ nv_debug(object, "handle 0x%08x not found\n", path[nr]);
+ return -ENOENT;
+ }
+ nouveau_namedb_put(handle);
+ parent = handle;
+ }
+
+ if (owner != NVIF_IOCTL_V0_OWNER_ANY &&
+ owner != handle->route) {
+ nv_ioctl(object, "object route != owner\n");
+ return -EACCES;
+ }
+ *route = handle->route;
+ *token = handle->token;
+
+ if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
+ if (nvkm_ioctl_v0[type].version == 0) {
+ ret = nvkm_ioctl_v0[type].func(handle, data, size);
+ }
+ }
+
+ return ret;
+}
+
+int
+nvkm_ioctl(struct nouveau_client *client, bool supervisor,
+ void *data, u32 size, void **hack)
+{
+ union {
+ struct nvif_ioctl_v0 v0;
+ } *args = data;
+ int ret;
+
+ client->super = supervisor;
+ nv_ioctl(client, "size %d\n", size);
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(client, "vers %d type %02x path %d owner %02x\n",
+ args->v0.version, args->v0.type, args->v0.path_nr,
+ args->v0.owner);
+ ret = nvkm_ioctl_path(client->root, args->v0.type,
+ args->v0.path_nr, args->v0.path,
+ data, size, args->v0.owner,
+ &args->v0.route, &args->v0.token);
+ }
+
+ nv_ioctl(client, "return %d\n", ret);
+ if (hack) {
+ *hack = client->data;
+ client->data = NULL;
+ }
+ client->super = false;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
new file mode 100644
index 000000000000..76adb81bdea2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <core/client.h>
+#include <core/event.h>
+#include <core/notify.h>
+
+#include <nvif/unpack.h>
+#include <nvif/event.h>
+
+static inline void
+nvkm_notify_put_locked(struct nvkm_notify *notify)
+{
+ if (notify->block++ == 0)
+ nvkm_event_put(notify->event, notify->types, notify->index);
+}
+
+void
+nvkm_notify_put(struct nvkm_notify *notify)
+{
+ struct nvkm_event *event = notify->event;
+ unsigned long flags;
+ if (likely(event) &&
+ test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ nvkm_notify_put_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ if (test_bit(NVKM_NOTIFY_WORK, &notify->flags))
+ flush_work(&notify->work);
+ }
+}
+
+static inline void
+nvkm_notify_get_locked(struct nvkm_notify *notify)
+{
+ if (--notify->block == 0)
+ nvkm_event_get(notify->event, notify->types, notify->index);
+}
+
+void
+nvkm_notify_get(struct nvkm_notify *notify)
+{
+ struct nvkm_event *event = notify->event;
+ unsigned long flags;
+ if (likely(event) &&
+ !test_and_set_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ nvkm_notify_get_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ }
+}
+
+static inline void
+nvkm_notify_func(struct nvkm_notify *notify)
+{
+ struct nvkm_event *event = notify->event;
+ int ret = notify->func(notify);
+ unsigned long flags;
+ if ((ret == NVKM_NOTIFY_KEEP) ||
+ !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ spin_lock_irqsave(&event->refs_lock, flags);
+ nvkm_notify_get_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ }
+}
+
+static void
+nvkm_notify_work(struct work_struct *work)
+{
+ struct nvkm_notify *notify = container_of(work, typeof(*notify), work);
+ nvkm_notify_func(notify);
+}
+
+void
+nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
+{
+ struct nvkm_event *event = notify->event;
+ unsigned long flags;
+
+ BUG_ON(!spin_is_locked(&event->list_lock));
+ BUG_ON(size != notify->size);
+
+ spin_lock_irqsave(&event->refs_lock, flags);
+ if (notify->block) {
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+ return;
+ }
+ nvkm_notify_put_locked(notify);
+ spin_unlock_irqrestore(&event->refs_lock, flags);
+
+ if (test_bit(NVKM_NOTIFY_WORK, &notify->flags)) {
+ memcpy((void *)notify->data, data, size);
+ schedule_work(&notify->work);
+ } else {
+ notify->data = data;
+ nvkm_notify_func(notify);
+ notify->data = NULL;
+ }
+}
+
+void
+nvkm_notify_fini(struct nvkm_notify *notify)
+{
+ unsigned long flags;
+ if (notify->event) {
+ nvkm_notify_put(notify);
+ spin_lock_irqsave(&notify->event->list_lock, flags);
+ list_del(&notify->head);
+ spin_unlock_irqrestore(&notify->event->list_lock, flags);
+ kfree((void *)notify->data);
+ notify->event = NULL;
+ }
+}
+
+int
+nvkm_notify_init(struct nvkm_event *event, int (*func)(struct nvkm_notify *),
+ bool work, void *data, u32 size, u32 reply,
+ struct nvkm_notify *notify)
+{
+ unsigned long flags;
+ int ret = -ENODEV;
+ if ((notify->event = event), event->refs) {
+ ret = event->func->ctor(data, size, notify);
+ if (ret == 0 && (ret = -EINVAL, notify->size == reply)) {
+ notify->flags = 0;
+ notify->block = 1;
+ notify->func = func;
+ notify->data = NULL;
+ if (ret = 0, work) {
+ INIT_WORK(&notify->work, nvkm_notify_work);
+ set_bit(NVKM_NOTIFY_WORK, &notify->flags);
+ notify->data = kmalloc(reply, GFP_KERNEL);
+ if (!notify->data)
+ ret = -ENOMEM;
+ }
+ }
+ if (ret == 0) {
+ spin_lock_irqsave(&event->list_lock, flags);
+ list_add_tail(&notify->head, &event->list);
+ spin_unlock_irqrestore(&event->list_lock, flags);
+ }
+ }
+ if (ret)
+ notify->event = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c
index 124538555904..b08630577c82 100644
--- a/drivers/gpu/drm/nouveau/core/core/object.c
+++ b/drivers/gpu/drm/nouveau/core/core/object.c
@@ -23,9 +23,6 @@
*/
#include <core/object.h>
-#include <core/parent.h>
-#include <core/namedb.h>
-#include <core/handle.h>
#include <core/engine.h>
#ifdef NOUVEAU_OBJECT_MAGIC
@@ -61,21 +58,15 @@ nouveau_object_create_(struct nouveau_object *parent,
return 0;
}
-static int
+int
_nouveau_object_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nouveau_object *object;
- int ret;
-
- ret = nouveau_object_create(parent, engine, oclass, 0, &object);
- *pobject = nv_object(object);
- if (ret)
- return ret;
-
- return 0;
+ if (size != 0)
+ return -ENOSYS;
+ return nouveau_object_create(parent, engine, oclass, 0, pobject);
}
void
@@ -91,42 +82,24 @@ nouveau_object_destroy(struct nouveau_object *object)
kfree(object);
}
-static void
-_nouveau_object_dtor(struct nouveau_object *object)
-{
- nouveau_object_destroy(object);
-}
-
int
nouveau_object_init(struct nouveau_object *object)
{
return 0;
}
-static int
-_nouveau_object_init(struct nouveau_object *object)
-{
- return nouveau_object_init(object);
-}
-
int
nouveau_object_fini(struct nouveau_object *object, bool suspend)
{
return 0;
}
-static int
-_nouveau_object_fini(struct nouveau_object *object, bool suspend)
-{
- return nouveau_object_fini(object, suspend);
-}
-
struct nouveau_ofuncs
nouveau_object_ofuncs = {
.ctor = _nouveau_object_ctor,
- .dtor = _nouveau_object_dtor,
- .init = _nouveau_object_init,
- .fini = _nouveau_object_fini,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
};
int
@@ -189,119 +162,6 @@ nouveau_object_ref(struct nouveau_object *obj, struct nouveau_object **ref)
}
int
-nouveau_object_new(struct nouveau_object *client, u32 _parent, u32 _handle,
- u16 _oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *engctx = NULL;
- struct nouveau_object *object = NULL;
- struct nouveau_object *engine;
- struct nouveau_oclass *oclass;
- struct nouveau_handle *handle;
- int ret;
-
- /* lookup parent object and ensure it *is* a parent */
- parent = nouveau_handle_ref(client, _parent);
- if (!parent) {
- nv_error(client, "parent 0x%08x not found\n", _parent);
- return -ENOENT;
- }
-
- if (!nv_iclass(parent, NV_PARENT_CLASS)) {
- nv_error(parent, "cannot have children\n");
- ret = -EINVAL;
- goto fail_class;
- }
-
- /* check that parent supports the requested subclass */
- ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
- if (ret) {
- nv_debug(parent, "illegal class 0x%04x\n", _oclass);
- goto fail_class;
- }
-
- /* make sure engine init has been completed *before* any objects
- * it controls are created - the constructors may depend on
- * state calculated at init (ie. default context construction)
- */
- if (engine) {
- ret = nouveau_object_inc(engine);
- if (ret)
- goto fail_class;
- }
-
- /* if engine requires it, create a context object to insert
- * between the parent and its children (eg. PGRAPH context)
- */
- if (engine && nv_engine(engine)->cclass) {
- ret = nouveau_object_ctor(parent, engine,
- nv_engine(engine)->cclass,
- data, size, &engctx);
- if (ret)
- goto fail_engctx;
- } else {
- nouveau_object_ref(parent, &engctx);
- }
-
- /* finally, create new object and bind it to its handle */
- ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
- *pobject = object;
- if (ret)
- goto fail_ctor;
-
- ret = nouveau_object_inc(object);
- if (ret)
- goto fail_init;
-
- ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
- if (ret)
- goto fail_handle;
-
- ret = nouveau_handle_init(handle);
- if (ret)
- nouveau_handle_destroy(handle);
-
-fail_handle:
- nouveau_object_dec(object, false);
-fail_init:
- nouveau_object_ref(NULL, &object);
-fail_ctor:
- nouveau_object_ref(NULL, &engctx);
-fail_engctx:
- if (engine)
- nouveau_object_dec(engine, false);
-fail_class:
- nouveau_object_ref(NULL, &parent);
- return ret;
-}
-
-int
-nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
-{
- struct nouveau_object *parent = NULL;
- struct nouveau_object *namedb = NULL;
- struct nouveau_handle *handle = NULL;
-
- parent = nouveau_handle_ref(client, _parent);
- if (!parent)
- return -ENOENT;
-
- namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
- if (namedb) {
- handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
- if (handle) {
- nouveau_namedb_put(handle);
- nouveau_handle_fini(handle, false);
- nouveau_handle_destroy(handle);
- }
- }
-
- nouveau_object_ref(NULL, &parent);
- return handle ? 0 : -EINVAL;
-}
-
-int
nouveau_object_inc(struct nouveau_object *object)
{
int ref = atomic_add_return(1, &object->usecount);
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c
index dee5d1235e9b..8701968a9743 100644
--- a/drivers/gpu/drm/nouveau/core/core/parent.c
+++ b/drivers/gpu/drm/nouveau/core/core/parent.c
@@ -75,6 +75,39 @@ nouveau_parent_sclass(struct nouveau_object *parent, u16 handle,
}
int
+nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size)
+{
+ struct nouveau_sclass *sclass;
+ struct nouveau_engine *engine;
+ struct nouveau_oclass *oclass;
+ int nr = -1, i;
+ u64 mask;
+
+ sclass = nv_parent(parent)->sclass;
+ while (sclass) {
+ if (++nr < size)
+ lclass[nr] = sclass->oclass->handle;
+ sclass = sclass->sclass;
+ }
+
+ mask = nv_parent(parent)->engine;
+ while (i = __ffs64(mask), mask) {
+ engine = nouveau_engine(parent, i);
+ if (engine && (oclass = engine->sclass)) {
+ while (oclass->ofuncs) {
+ if (++nr < size)
+ lclass[nr] = oclass->handle;
+ oclass++;
+ }
+ }
+
+ mask &= ~(1ULL << i);
+ }
+
+ return nr + 1;
+}
+
+int
nouveau_parent_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, u32 pclass,
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index f31527733e00..abb410ef09ea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -30,7 +30,6 @@
#include <subdev/vm.h>
#include <core/client.h>
-#include <core/class.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index ac3291f781f6..9261694d0d35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -26,9 +26,7 @@
#include <engine/fifo.h>
#include <engine/copy.h>
-#include <core/class.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/enum.h>
#include "fuc/nvc0.fuc.h"
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 748a61eb3c6f..c7194b354605 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -24,7 +24,6 @@
#include <core/os.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <engine/copy.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 2551dafbec73..ea5c42f31791 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -25,7 +25,6 @@
#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index c7082377ec76..5571c09534cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -25,7 +25,6 @@
#include <core/client.h>
#include <core/os.h>
#include <core/enum.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/timer.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.c b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c
new file mode 100644
index 000000000000..4dbf0ba89e5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "acpi.h"
+
+#ifdef CONFIG_ACPI
+static int
+nvkm_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct nouveau_device *device =
+ container_of(nb, typeof(*device), acpi.nb);
+ struct acpi_bus_event *info = data;
+
+ if (!strcmp(info->device_class, "ac_adapter"))
+ nvkm_event_send(&device->event, 1, 0, NULL, 0);
+
+ return NOTIFY_DONE;
+}
+#endif
+
+int
+nvkm_acpi_fini(struct nouveau_device *device, bool suspend)
+{
+#ifdef CONFIG_ACPI
+ unregister_acpi_notifier(&device->acpi.nb);
+#endif
+ return 0;
+}
+
+int
+nvkm_acpi_init(struct nouveau_device *device)
+{
+#ifdef CONFIG_ACPI
+ device->acpi.nb.notifier_call = nvkm_acpi_ntfy;
+ register_acpi_notifier(&device->acpi.nb);
+#endif
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/acpi.h b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
new file mode 100644
index 000000000000..cc49f4f568cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/device/acpi.h
@@ -0,0 +1,9 @@
+#ifndef __NVKM_DEVICE_ACPI_H__
+#define __NVKM_DEVICE_ACPI_H__
+
+#include <engine/device.h>
+
+int nvkm_acpi_init(struct nouveau_device *);
+int nvkm_acpi_fini(struct nouveau_device *, bool);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 18c8c7245b73..8928f7981d4a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -26,10 +26,14 @@
#include <core/device.h>
#include <core/client.h>
#include <core/option.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
-#include <core/class.h>
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
#include "priv.h"
+#include "acpi.h"
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
@@ -49,74 +53,258 @@ nouveau_device_find(u64 name)
return match;
}
+int
+nouveau_device_list(u64 *name, int size)
+{
+ struct nouveau_device *device;
+ int nr = 0;
+ mutex_lock(&nv_devices_mutex);
+ list_for_each_entry(device, &nv_devices, head) {
+ if (nr++ < size)
+ name[nr - 1] = device->handle;
+ }
+ mutex_unlock(&nv_devices_mutex);
+ return nr;
+}
+
/******************************************************************************
* nouveau_devobj (0x0080): class implementation
*****************************************************************************/
+
struct nouveau_devobj {
struct nouveau_parent base;
struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
};
+static int
+nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
+{
+ struct nouveau_device *device = nv_device(object);
+ struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nouveau_instmem *imem = nouveau_instmem(device);
+ union {
+ struct nv_device_info_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "device info size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "device info vers %d\n", args->v0.version);
+ } else
+ return ret;
+
+ switch (device->chipset) {
+ case 0x01a:
+ case 0x01f:
+ case 0x04c:
+ case 0x04e:
+ case 0x063:
+ case 0x067:
+ case 0x068:
+ case 0x0aa:
+ case 0x0ac:
+ case 0x0af:
+ args->v0.platform = NV_DEVICE_INFO_V0_IGP;
+ break;
+ default:
+ if (device->pdev) {
+ if (pci_find_capability(device->pdev, PCI_CAP_ID_AGP))
+ args->v0.platform = NV_DEVICE_INFO_V0_AGP;
+ else
+ if (pci_is_pcie(device->pdev))
+ args->v0.platform = NV_DEVICE_INFO_V0_PCIE;
+ else
+ args->v0.platform = NV_DEVICE_INFO_V0_PCI;
+ } else {
+ args->v0.platform = NV_DEVICE_INFO_V0_SOC;
+ }
+ break;
+ }
+
+ switch (device->card_type) {
+ case NV_04: args->v0.family = NV_DEVICE_INFO_V0_TNT; break;
+ case NV_10:
+ case NV_11: args->v0.family = NV_DEVICE_INFO_V0_CELSIUS; break;
+ case NV_20: args->v0.family = NV_DEVICE_INFO_V0_KELVIN; break;
+ case NV_30: args->v0.family = NV_DEVICE_INFO_V0_RANKINE; break;
+ case NV_40: args->v0.family = NV_DEVICE_INFO_V0_CURIE; break;
+ case NV_50: args->v0.family = NV_DEVICE_INFO_V0_TESLA; break;
+ case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
+ case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
+ case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
+ default:
+ args->v0.family = 0;
+ break;
+ }
+
+ args->v0.chipset = device->chipset;
+ args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00;
+ if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
+ else args->v0.ram_size = args->v0.ram_user = 0;
+ if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
+ return 0;
+}
+
+static int
+nouveau_devobj_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NV_DEVICE_V0_INFO:
+ return nouveau_devobj_info(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static u8
+nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd08(object->engine, addr);
+}
+
+static u16
+nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd16(object->engine, addr);
+}
+
+static u32
+nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
+{
+ return nv_rd32(object->engine, addr);
+}
+
+static void
+nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
+{
+ nv_wr08(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
+{
+ nv_wr16(object->engine, addr, data);
+}
+
+static void
+nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
+{
+ nv_wr32(object->engine, addr, data);
+}
+
+static int
+nouveau_devobj_map(struct nouveau_object *object, u64 *addr, u32 *size)
+{
+ struct nouveau_device *device = nv_device(object);
+ *addr = nv_device_resource_start(device, 0);
+ *size = nv_device_resource_len(device, 0);
+ return 0;
+}
+
static const u64 disable_map[] = {
- [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
- [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_SUBDEV_PWR] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_ENGINE_PERFMON] = NV_DEVICE_DISABLE_CORE,
- [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
- [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
- [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
- [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
- [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
- [NVDEV_ENGINE_VP] = NV_DEVICE_DISABLE_VP,
- [NVDEV_ENGINE_CRYPT] = NV_DEVICE_DISABLE_CRYPT,
- [NVDEV_ENGINE_BSP] = NV_DEVICE_DISABLE_BSP,
- [NVDEV_ENGINE_PPP] = NV_DEVICE_DISABLE_PPP,
- [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
- [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
- [NVDEV_ENGINE_VIC] = NV_DEVICE_DISABLE_VIC,
- [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
- [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
+ [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_V0_DISABLE_VBIOS,
+ [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_GPIO] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_I2C] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_MXM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_MC] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_BUS] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_TIMER] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_FB] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_LTC] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_IBUS] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_VM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_BAR] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
+ [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
+ [NVDEV_ENGINE_SW] = NV_DEVICE_V0_DISABLE_FIFO,
+ [NVDEV_ENGINE_GR] = NV_DEVICE_V0_DISABLE_GRAPH,
+ [NVDEV_ENGINE_MPEG] = NV_DEVICE_V0_DISABLE_MPEG,
+ [NVDEV_ENGINE_ME] = NV_DEVICE_V0_DISABLE_ME,
+ [NVDEV_ENGINE_VP] = NV_DEVICE_V0_DISABLE_VP,
+ [NVDEV_ENGINE_CRYPT] = NV_DEVICE_V0_DISABLE_CRYPT,
+ [NVDEV_ENGINE_BSP] = NV_DEVICE_V0_DISABLE_BSP,
+ [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
+ [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0,
+ [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1,
+ [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
+ [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
+ [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
[NVDEV_SUBDEV_NR] = 0,
};
+static void
+nouveau_devobj_dtor(struct nouveau_object *object)
+{
+ struct nouveau_devobj *devobj = (void *)object;
+ int i;
+
+ for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
+ nouveau_object_ref(NULL, &devobj->subdev[i]);
+
+ nouveau_parent_destroy(&devobj->base);
+}
+
+static struct nouveau_oclass
+nouveau_devobj_oclass_super = {
+ .handle = NV_DEVICE,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_devobj_dtor,
+ .init = _nouveau_parent_init,
+ .fini = _nouveau_parent_fini,
+ .mthd = nouveau_devobj_mthd,
+ .map = nouveau_devobj_map,
+ .rd08 = nouveau_devobj_rd08,
+ .rd16 = nouveau_devobj_rd16,
+ .rd32 = nouveau_devobj_rd32,
+ .wr08 = nouveau_devobj_wr08,
+ .wr16 = nouveau_devobj_wr16,
+ .wr32 = nouveau_devobj_wr32,
+ }
+};
+
static int
nouveau_devobj_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv_device_v0 v0;
+ } *args = data;
struct nouveau_client *client = nv_client(parent);
struct nouveau_device *device;
struct nouveau_devobj *devobj;
- struct nv_device_class *args = data;
u32 boot0, strap;
u64 disable, mmio_base, mmio_size;
void __iomem *map;
int ret, i, c;
- if (size < sizeof(struct nv_device_class))
- return -EINVAL;
+ nv_ioctl(parent, "create device size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create device v%d device %016llx "
+ "disable %016llx debug0 %016llx\n",
+ args->v0.version, args->v0.device,
+ args->v0.disable, args->v0.debug0);
+ } else
+ return ret;
+
+ /* give priviledged clients register access */
+ if (client->super)
+ oclass = &nouveau_devobj_oclass_super;
/* find the device subdev that matches what the client requested */
device = nv_device(client->device);
- if (args->device != ~0) {
- device = nouveau_device_find(args->device);
+ if (args->v0.device != ~0) {
+ device = nouveau_device_find(args->v0.device);
if (!device)
return -ENODEV;
}
@@ -135,14 +323,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
mmio_size = nv_device_resource_len(device, 0);
/* translate api disable mask into internal mapping */
- disable = args->debug0;
+ disable = args->v0.debug0;
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
- if (args->disable & disable_map[i])
+ if (args->v0.disable & disable_map[i])
disable |= (1ULL << i);
}
/* identify the chipset, and determine classes of subdev/engines */
- if (!(args->disable & NV_DEVICE_DISABLE_IDENTIFY) &&
+ if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY) &&
!device->card_type) {
map = ioremap(mmio_base, 0x102000);
if (map == NULL)
@@ -180,8 +368,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case 0x080:
case 0x090:
case 0x0a0: device->card_type = NV_50; break;
- case 0x0c0: device->card_type = NV_C0; break;
- case 0x0d0: device->card_type = NV_D0; break;
+ case 0x0c0:
+ case 0x0d0: device->card_type = NV_C0; break;
case 0x0e0:
case 0x0f0:
case 0x100: device->card_type = NV_E0; break;
@@ -206,8 +394,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
case NV_30: ret = nv30_identify(device); break;
case NV_40: ret = nv40_identify(device); break;
case NV_50: ret = nv50_identify(device); break;
- case NV_C0:
- case NV_D0: ret = nvc0_identify(device); break;
+ case NV_C0: ret = nvc0_identify(device); break;
case NV_E0: ret = nve0_identify(device); break;
case GM100: ret = gm100_identify(device); break;
default:
@@ -242,7 +429,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
}
- if (!(args->disable & NV_DEVICE_DISABLE_MMIO) &&
+ if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
!nv_subdev(device)->mmio) {
nv_subdev(device)->mmio = ioremap(mmio_base, mmio_size);
if (!nv_subdev(device)->mmio) {
@@ -298,71 +485,19 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
return 0;
}
-static void
-nouveau_devobj_dtor(struct nouveau_object *object)
-{
- struct nouveau_devobj *devobj = (void *)object;
- int i;
-
- for (i = NVDEV_SUBDEV_NR - 1; i >= 0; i--)
- nouveau_object_ref(NULL, &devobj->subdev[i]);
-
- nouveau_parent_destroy(&devobj->base);
-}
-
-static u8
-nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
-{
- return nv_rd08(object->engine, addr);
-}
-
-static u16
-nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
-{
- return nv_rd16(object->engine, addr);
-}
-
-static u32
-nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
-{
- return nv_rd32(object->engine, addr);
-}
-
-static void
-nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
-{
- nv_wr08(object->engine, addr, data);
-}
-
-static void
-nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
-{
- nv_wr16(object->engine, addr, data);
-}
-
-static void
-nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
-{
- nv_wr32(object->engine, addr, data);
-}
-
static struct nouveau_ofuncs
nouveau_devobj_ofuncs = {
.ctor = nouveau_devobj_ctor,
.dtor = nouveau_devobj_dtor,
.init = _nouveau_parent_init,
.fini = _nouveau_parent_fini,
- .rd08 = nouveau_devobj_rd08,
- .rd16 = nouveau_devobj_rd16,
- .rd32 = nouveau_devobj_rd32,
- .wr08 = nouveau_devobj_wr08,
- .wr16 = nouveau_devobj_wr16,
- .wr32 = nouveau_devobj_wr32,
+ .mthd = nouveau_devobj_mthd,
};
/******************************************************************************
* nouveau_device: engine functions
*****************************************************************************/
+
static struct nouveau_oclass
nouveau_device_sclass[] = {
{ 0x0080, &nouveau_devobj_ofuncs },
@@ -370,6 +505,23 @@ nouveau_device_sclass[] = {
};
static int
+nouveau_device_event_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ if (!WARN_ON(size != 0)) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static const struct nvkm_event_func
+nouveau_device_event_func = {
+ .ctor = nouveau_device_event_ctor,
+};
+
+static int
nouveau_device_fini(struct nouveau_object *object, bool suspend)
{
struct nouveau_device *device = (void *)object;
@@ -386,7 +538,7 @@ nouveau_device_fini(struct nouveau_object *object, bool suspend)
}
}
- ret = 0;
+ ret = nvkm_acpi_fini(device, suspend);
fail:
for (; ret && i < NVDEV_SUBDEV_NR; i++) {
if ((subdev = device->subdev[i])) {
@@ -407,7 +559,11 @@ nouveau_device_init(struct nouveau_object *object)
{
struct nouveau_device *device = (void *)object;
struct nouveau_object *subdev;
- int ret, i;
+ int ret, i = 0;
+
+ ret = nvkm_acpi_init(device);
+ if (ret)
+ goto fail;
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if ((subdev = device->subdev[i])) {
@@ -430,6 +586,8 @@ fail:
}
}
+ if (ret)
+ nvkm_acpi_fini(device, false);
return ret;
}
@@ -438,6 +596,8 @@ nouveau_device_dtor(struct nouveau_object *object)
{
struct nouveau_device *device = (void *)object;
+ nvkm_event_fini(&device->event);
+
mutex_lock(&nv_devices_mutex);
list_del(&device->head);
mutex_unlock(&nv_devices_mutex);
@@ -478,31 +638,6 @@ nv_device_resource_len(struct nouveau_device *device, unsigned int bar)
}
}
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page)
-{
- dma_addr_t ret;
-
- if (nv_device_is_pci(device)) {
- ret = pci_map_page(device->pdev, page, 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(device->pdev, ret))
- ret = 0;
- } else {
- ret = page_to_phys(page);
- }
-
- return ret;
-}
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr)
-{
- if (nv_device_is_pci(device))
- pci_unmap_page(device->pdev, addr, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
-}
-
int
nv_device_get_irq(struct nouveau_device *device, bool stall)
{
@@ -560,6 +695,9 @@ nouveau_device_create_(void *dev, enum nv_bus_type type, u64 name,
nv_subdev(device)->debug = nouveau_dbgopt(device->dbgopt, "DEVICE");
nv_engine(device)->sclass = nouveau_device_sclass;
list_add(&device->head, &nv_devices);
+
+ ret = nvkm_event_init(&nouveau_device_event_func, 1, 1,
+ &device->event);
done:
mutex_unlock(&nv_devices_mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
index 4b69bf56ed01..e34101a3490e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/ctrl.c
@@ -22,55 +22,82 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include <core/client.h>
#include <core/object.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
#include <subdev/clock.h>
#include "priv.h"
static int
-nouveau_control_mthd_pstate_info(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_control_mthd_pstate_info(struct nouveau_object *object,
+ void *data, u32 size)
{
+ union {
+ struct nvif_control_pstate_info_v0 v0;
+ } *args = data;
struct nouveau_clock *clk = nouveau_clock(object);
- struct nv_control_pstate_info *args = data;
+ int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(object, "control pstate info size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "control pstate info vers %d\n",
+ args->v0.version);
+ } else
+ return ret;
if (clk) {
- args->count = clk->state_nr;
- args->ustate = clk->ustate;
- args->pstate = clk->pstate;
+ args->v0.count = clk->state_nr;
+ args->v0.ustate_ac = clk->ustate_ac;
+ args->v0.ustate_dc = clk->ustate_dc;
+ args->v0.pwrsrc = clk->pwrsrc;
+ args->v0.pstate = clk->pstate;
} else {
- args->count = 0;
- args->ustate = NV_CONTROL_PSTATE_INFO_USTATE_DISABLE;
- args->pstate = NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN;
+ args->v0.count = 0;
+ args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
+ args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
+ args->v0.pwrsrc = -ENOSYS;
+ args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
return 0;
}
static int
-nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_control_mthd_pstate_attr(struct nouveau_object *object,
+ void *data, u32 size)
{
+ union {
+ struct nvif_control_pstate_attr_v0 v0;
+ } *args = data;
struct nouveau_clock *clk = nouveau_clock(object);
- struct nv_control_pstate_attr *args = data;
struct nouveau_clocks *domain;
struct nouveau_pstate *pstate;
struct nouveau_cstate *cstate;
int i = 0, j = -1;
u32 lo, hi;
-
- if ((size < sizeof(*args)) || !clk ||
- (args->state >= 0 && args->state >= clk->state_nr))
- return -EINVAL;
+ int ret;
+
+ nv_ioctl(object, "control pstate attr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "control pstate attr vers %d state %d "
+ "index %d\n",
+ args->v0.version, args->v0.state, args->v0.index);
+ if (!clk)
+ return -ENODEV;
+ if (args->v0.state < NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT)
+ return -EINVAL;
+ if (args->v0.state >= clk->state_nr)
+ return -EINVAL;
+ } else
+ return ret;
domain = clk->domains;
while (domain->name != nv_clk_src_max) {
- if (domain->mname && ++j == args->index)
+ if (domain->mname && ++j == args->v0.index)
break;
domain++;
}
@@ -78,9 +105,9 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
if (domain->name == nv_clk_src_max)
return -EINVAL;
- if (args->state != NV_CONTROL_PSTATE_ATTR_STATE_CURRENT) {
+ if (args->v0.state != NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT) {
list_for_each_entry(pstate, &clk->states, head) {
- if (i++ == args->state)
+ if (i++ == args->v0.state)
break;
}
@@ -91,21 +118,21 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
hi = max(hi, cstate->domain[domain->name]);
}
- args->state = pstate->pstate;
+ args->v0.state = pstate->pstate;
} else {
lo = max(clk->read(clk, domain->name), 0);
hi = lo;
}
- snprintf(args->name, sizeof(args->name), "%s", domain->mname);
- snprintf(args->unit, sizeof(args->unit), "MHz");
- args->min = lo / domain->mdiv;
- args->max = hi / domain->mdiv;
+ snprintf(args->v0.name, sizeof(args->v0.name), "%s", domain->mname);
+ snprintf(args->v0.unit, sizeof(args->v0.unit), "MHz");
+ args->v0.min = lo / domain->mdiv;
+ args->v0.max = hi / domain->mdiv;
- args->index = 0;
+ args->v0.index = 0;
while ((++domain)->name != nv_clk_src_max) {
if (domain->mname) {
- args->index = ++j;
+ args->v0.index = ++j;
break;
}
}
@@ -114,31 +141,65 @@ nouveau_control_mthd_pstate_attr(struct nouveau_object *object, u32 mthd,
}
static int
-nouveau_control_mthd_pstate_user(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_control_mthd_pstate_user(struct nouveau_object *object,
+ void *data, u32 size)
{
+ union {
+ struct nvif_control_pstate_user_v0 v0;
+ } *args = data;
struct nouveau_clock *clk = nouveau_clock(object);
- struct nv_control_pstate_user *args = data;
+ int ret;
+
+ nv_ioctl(object, "control pstate user size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "control pstate user vers %d ustate %d "
+ "pwrsrc %d\n", args->v0.version,
+ args->v0.ustate, args->v0.pwrsrc);
+ if (!clk)
+ return -ENODEV;
+ } else
+ return ret;
+
+ if (args->v0.pwrsrc >= 0) {
+ ret |= nouveau_clock_ustate(clk, args->v0.ustate, args->v0.pwrsrc);
+ } else {
+ ret |= nouveau_clock_ustate(clk, args->v0.ustate, 0);
+ ret |= nouveau_clock_ustate(clk, args->v0.ustate, 1);
+ }
- if (size < sizeof(*args) || !clk)
- return -EINVAL;
+ return ret;
+}
- return nouveau_clock_ustate(clk, args->state);
+static int
+nouveau_control_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NVIF_CONTROL_PSTATE_INFO:
+ return nouveau_control_mthd_pstate_info(object, data, size);
+ case NVIF_CONTROL_PSTATE_ATTR:
+ return nouveau_control_mthd_pstate_attr(object, data, size);
+ case NVIF_CONTROL_PSTATE_USER:
+ return nouveau_control_mthd_pstate_user(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
}
+static struct nouveau_ofuncs
+nouveau_control_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ .mthd = nouveau_control_mthd,
+};
+
struct nouveau_oclass
nouveau_control_oclass[] = {
- { .handle = NV_CONTROL_CLASS,
- .ofuncs = &nouveau_object_ofuncs,
- .omthds = (struct nouveau_omthds[]) {
- { NV_CONTROL_PSTATE_INFO,
- NV_CONTROL_PSTATE_INFO, nouveau_control_mthd_pstate_info },
- { NV_CONTROL_PSTATE_ATTR,
- NV_CONTROL_PSTATE_ATTR, nouveau_control_mthd_pstate_attr },
- { NV_CONTROL_PSTATE_USER,
- NV_CONTROL_PSTATE_USER, nouveau_control_mthd_pstate_user },
- {},
- },
+ { .handle = NVIF_IOCTL_NEW_V0_CONTROL,
+ .ofuncs = &nouveau_control_ofuncs
},
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index a520029e25d9..377ec0b8851e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -33,7 +33,7 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/ibus.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
@@ -68,20 +68,20 @@ gm100_identify(struct nouveau_device *device)
#endif
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = gm107_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gm107_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
#if 0
- device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
#endif
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
index 40b29d0214cb..573b55f5c2f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv04.c
@@ -56,7 +56,7 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
@@ -74,7 +74,7 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv04_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv04_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
index 5f7c25ff523d..183a85a6204e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv10.c
@@ -58,7 +58,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nv04_disp_oclass;
break;
@@ -75,7 +75,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -94,7 +94,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -113,7 +113,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -132,7 +132,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv10_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -151,7 +151,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -170,7 +170,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv1a_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
@@ -189,7 +189,7 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
index 75fed11bba0a..aa564c68a920 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv20.c
@@ -59,7 +59,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
@@ -78,7 +78,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
@@ -97,7 +97,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
@@ -116,7 +116,7 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv25_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
index 36919d7db7cc..11bd31da82ab 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv30.c
@@ -59,7 +59,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
@@ -78,7 +78,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv35_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
@@ -97,7 +97,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
@@ -117,7 +117,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv36_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
@@ -137,7 +137,7 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv17_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1130a62be2c7..e96c223cb797 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -65,7 +65,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -88,7 +88,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -111,7 +111,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -134,7 +134,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -157,7 +157,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -180,7 +180,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -203,7 +203,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -226,7 +226,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -249,7 +249,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -272,7 +272,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -295,7 +295,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -318,7 +318,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -341,7 +341,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -364,7 +364,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -387,7 +387,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
@@ -410,7 +410,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv04_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv40_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv10_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
index ef0b0bde1a91..932f84fae459 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
@@ -74,7 +74,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv50_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -99,7 +99,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -127,7 +127,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -155,7 +155,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -183,7 +183,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -211,7 +211,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -239,7 +239,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -267,7 +267,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -295,7 +295,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -323,7 +323,7 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -350,9 +350,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -380,9 +380,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -409,9 +409,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
@@ -438,9 +438,9 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nva3_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nva3_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nv50_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv84_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nv50_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d55ed633b19..b4a2917ce555 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -33,7 +33,7 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/ibus.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
@@ -70,14 +70,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc0_graph_oclass;
@@ -102,14 +102,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -134,14 +134,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -165,14 +165,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -197,14 +197,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc4_graph_oclass;
@@ -229,14 +229,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc1_graph_oclass;
@@ -260,14 +260,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvc0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvc0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvc0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvc8_graph_oclass;
@@ -292,14 +292,14 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvd9_graph_oclass;
@@ -323,12 +323,12 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gf100_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nvc0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nvc0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvd7_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 2d1e97d4264f..54ec53bc6252 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -33,7 +33,7 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/fb.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/ibus.h>
#include <subdev/instmem.h>
#include <subdev/vm.h>
@@ -70,14 +70,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -103,14 +103,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -136,14 +136,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = gk104_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nve4_graph_oclass;
@@ -158,15 +158,16 @@ nve0_identify(struct nouveau_device *device)
break;
case 0xea:
device->cname = "GK20A";
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &gk20a_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
- device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &gk20a_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = gk20a_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
@@ -186,14 +187,14 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
@@ -219,17 +220,17 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nvd0_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nvd0_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nve0_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
- device->oclass[NVDEV_ENGINE_GR ] = nvf0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = gk110b_graph_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
@@ -248,18 +249,18 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
- device->oclass[NVDEV_SUBDEV_LTCG ] = gf100_ltcg_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
- device->oclass[NVDEV_SUBDEV_PWR ] = &nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
- device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
index 9c38c5e40500..22d55f6cde50 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -22,23 +22,93 @@
* Authors: Ben Skeggs
*/
+#include <core/os.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/event.h>
+
#include "priv.h"
#include "outp.h"
#include "conn.h"
+int
+nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ struct nouveau_disp *disp =
+ container_of(notify->event, typeof(*disp), vblank);
+ union {
+ struct nvif_notify_head_req_v0 v0;
+ } *req = data;
+ int ret;
+
+ if (nvif_unpack(req->v0, 0, 0, false)) {
+ notify->size = sizeof(struct nvif_notify_head_rep_v0);
+ if (ret = -ENXIO, req->v0.head <= disp->vblank.index_nr) {
+ notify->types = 1;
+ notify->index = req->v0.head;
+ return 0;
+ }
+ }
+
+ return ret;
+}
+
+void
+nouveau_disp_vblank(struct nouveau_disp *disp, int head)
+{
+ struct nvif_notify_head_rep_v0 rep = {};
+ nvkm_event_send(&disp->vblank, 1, head, &rep, sizeof(rep));
+}
+
static int
-nouveau_disp_hpd_check(struct nouveau_event *event, u32 types, int index)
+nouveau_disp_hpd_ctor(void *data, u32 size, struct nvkm_notify *notify)
{
- struct nouveau_disp *disp = event->priv;
+ struct nouveau_disp *disp =
+ container_of(notify->event, typeof(*disp), hpd);
+ union {
+ struct nvif_notify_conn_req_v0 v0;
+ } *req = data;
struct nvkm_output *outp;
- list_for_each_entry(outp, &disp->outp, head) {
- if (outp->conn->index == index) {
- if (outp->conn->hpd.event)
- return 0;
- break;
+ int ret;
+
+ if (nvif_unpack(req->v0, 0, 0, false)) {
+ notify->size = sizeof(struct nvif_notify_conn_rep_v0);
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (ret = -ENXIO, outp->conn->index == req->v0.conn) {
+ if (ret = -ENODEV, outp->conn->hpd.event) {
+ notify->types = req->v0.mask;
+ notify->index = req->v0.conn;
+ ret = 0;
+ }
+ break;
+ }
}
}
- return -ENOSYS;
+
+ return ret;
+}
+
+static const struct nvkm_event_func
+nouveau_disp_hpd_func = {
+ .ctor = nouveau_disp_hpd_ctor
+};
+
+int
+nouveau_disp_ntfy(struct nouveau_object *object, u32 type,
+ struct nvkm_event **event)
+{
+ struct nouveau_disp *disp = (void *)object->engine;
+ switch (type) {
+ case NV04_DISP_NTFY_VBLANK:
+ *event = &disp->vblank;
+ return 0;
+ case NV04_DISP_NTFY_CONN:
+ *event = &disp->hpd;
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
}
int
@@ -97,7 +167,8 @@ _nouveau_disp_dtor(struct nouveau_object *object)
struct nouveau_disp *disp = (void *)object;
struct nvkm_output *outp, *outt;
- nouveau_event_destroy(&disp->vblank);
+ nvkm_event_fini(&disp->vblank);
+ nvkm_event_fini(&disp->hpd);
if (disp->outp.next) {
list_for_each_entry_safe(outp, outt, &disp->outp, head) {
@@ -157,14 +228,11 @@ nouveau_disp_create_(struct nouveau_object *parent,
hpd = max(hpd, (u8)(dcbE.connector + 1));
}
- ret = nouveau_event_create(3, hpd, &disp->hpd);
+ ret = nvkm_event_init(&nouveau_disp_hpd_func, 3, hpd, &disp->hpd);
if (ret)
return ret;
- disp->hpd->priv = disp;
- disp->hpd->check = nouveau_disp_hpd_check;
-
- ret = nouveau_event_create(1, heads, &disp->vblank);
+ ret = nvkm_event_init(impl->vblank, 1, heads, &disp->vblank);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
index 4ffbc70ecf5a..3d1070228977 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.c
@@ -22,39 +22,41 @@
* Authors: Ben Skeggs
*/
+#include <core/os.h>
+#include <nvif/event.h>
+
#include <subdev/gpio.h>
#include "conn.h"
#include "outp.h"
-static void
-nvkm_connector_hpd_work(struct work_struct *w)
+static int
+nvkm_connector_hpd(struct nvkm_notify *notify)
{
- struct nvkm_connector *conn = container_of(w, typeof(*conn), hpd.work);
+ struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
struct nouveau_disp *disp = nouveau_disp(conn);
struct nouveau_gpio *gpio = nouveau_gpio(conn);
- u32 send = NVKM_HPD_UNPLUG;
- if (gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.event->index))
- send = NVKM_HPD_PLUG;
- nouveau_event_trigger(disp->hpd, send, conn->index);
- nouveau_event_get(conn->hpd.event);
-}
+ const struct nvkm_gpio_ntfy_rep *line = notify->data;
+ struct nvif_notify_conn_rep_v0 rep;
+ int index = conn->index;
-static int
-nvkm_connector_hpd(void *data, u32 type, int index)
-{
- struct nvkm_connector *conn = data;
- DBG("HPD: %d\n", type);
- schedule_work(&conn->hpd.work);
- return NVKM_EVENT_DROP;
+ DBG("HPD: %d\n", line->mask);
+
+ if (!gpio->get(gpio, 0, DCB_GPIO_UNUSED, conn->hpd.index))
+ rep.mask = NVIF_NOTIFY_CONN_V0_UNPLUG;
+ else
+ rep.mask = NVIF_NOTIFY_CONN_V0_PLUG;
+ rep.version = 0;
+
+ nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+ return NVKM_NOTIFY_KEEP;
}
int
_nvkm_connector_fini(struct nouveau_object *object, bool suspend)
{
struct nvkm_connector *conn = (void *)object;
- if (conn->hpd.event)
- nouveau_event_put(conn->hpd.event);
+ nvkm_notify_put(&conn->hpd);
return nouveau_object_fini(&conn->base, suspend);
}
@@ -63,10 +65,8 @@ _nvkm_connector_init(struct nouveau_object *object)
{
struct nvkm_connector *conn = (void *)object;
int ret = nouveau_object_init(&conn->base);
- if (ret == 0) {
- if (conn->hpd.event)
- nouveau_event_get(conn->hpd.event);
- }
+ if (ret == 0)
+ nvkm_notify_get(&conn->hpd);
return ret;
}
@@ -74,7 +74,7 @@ void
_nvkm_connector_dtor(struct nouveau_object *object)
{
struct nvkm_connector *conn = (void *)object;
- nouveau_event_ref(NULL, &conn->hpd.event);
+ nvkm_notify_fini(&conn->hpd);
nouveau_object_destroy(&conn->base);
}
@@ -116,19 +116,24 @@ nvkm_connector_create_(struct nouveau_object *parent,
if ((info->hpd = ffs(info->hpd))) {
if (--info->hpd >= ARRAY_SIZE(hpd)) {
ERR("hpd %02x unknown\n", info->hpd);
- goto done;
+ return 0;
}
info->hpd = hpd[info->hpd];
ret = gpio->find(gpio, 0, info->hpd, DCB_GPIO_UNUSED, &func);
if (ret) {
ERR("func %02x lookup failed, %d\n", info->hpd, ret);
- goto done;
+ return 0;
}
- ret = nouveau_event_new(gpio->events, NVKM_GPIO_TOGGLED,
- func.line, nvkm_connector_hpd,
- conn, &conn->hpd.event);
+ ret = nvkm_notify_init(&gpio->event, nvkm_connector_hpd, true,
+ &(struct nvkm_gpio_ntfy_req) {
+ .mask = NVKM_GPIO_TOGGLED,
+ .line = func.line,
+ },
+ sizeof(struct nvkm_gpio_ntfy_req),
+ sizeof(struct nvkm_gpio_ntfy_rep),
+ &conn->hpd);
if (ret) {
ERR("func %02x failed, %d\n", info->hpd, ret);
} else {
@@ -136,8 +141,6 @@ nvkm_connector_create_(struct nouveau_object *parent,
}
}
-done:
- INIT_WORK(&conn->hpd.work, nvkm_connector_hpd_work);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
index 035ebeacbb1c..55e5f5c82c14 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/conn.h
@@ -10,10 +10,7 @@ struct nvkm_connector {
struct nvbios_connE info;
int index;
- struct {
- struct nouveau_eventh *event;
- struct work_struct work;
- } hpd;
+ struct nvkm_notify hpd;
};
#define nvkm_connector_create(p,e,c,b,i,d) \
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index a66b27c0fcab..b36addff06a9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -32,13 +33,28 @@
#include "nv50.h"
int
-nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
+nv50_dac_power(NV50_DISP_MTHD_V1)
{
- const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
- (data & NV50_DISP_DAC_PWR_VSYNC) |
- (data & NV50_DISP_DAC_PWR_DATA) |
- (data & NV50_DISP_DAC_PWR_STATE);
- const u32 doff = (or * 0x800);
+ const u32 doff = outp->or * 0x800;
+ union {
+ struct nv50_disp_dac_pwr_v0 v0;
+ } *args = data;
+ u32 stat;
+ int ret;
+
+ nv_ioctl(object, "disp dac pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp dac pwr vers %d state %d data %d "
+ "vsync %d hsync %d\n",
+ args->v0.version, args->v0.state, args->v0.data,
+ args->v0.vsync, args->v0.hsync);
+ stat = 0x00000040 * !args->v0.state;
+ stat |= 0x00000010 * !args->v0.data;
+ stat |= 0x00000004 * !args->v0.vsync;
+ stat |= 0x00000001 * !args->v0.hsync;
+ } else
+ return ret;
+
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
@@ -46,9 +62,24 @@ nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
}
int
-nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
+nv50_dac_sense(NV50_DISP_MTHD_V1)
{
- const u32 doff = (or * 0x800);
+ union {
+ struct nv50_disp_dac_load_v0 v0;
+ } *args = data;
+ const u32 doff = outp->or * 0x800;
+ u32 loadval;
+ int ret;
+
+ nv_ioctl(object, "disp dac load size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp dac load vers %d data %08x\n",
+ args->v0.version, args->v0.data);
+ if (args->v0.data & 0xfff00000)
+ return -EINVAL;
+ loadval = args->v0.data;
+ } else
+ return ret;
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
@@ -61,38 +92,10 @@ nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
- nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
+ nv_debug(priv, "DAC%d sense: 0x%08x\n", outp->or, loadval);
if (!(loadval & 0x80000000))
return -ETIMEDOUT;
- return (loadval & 0x38000000) >> 27;
-}
-
-int
-nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
- u32 *data = args;
- int ret;
-
- if (size < sizeof(u32))
- return -EINVAL;
-
- switch (mthd & ~0x3f) {
- case NV50_DISP_DAC_PWR:
- ret = priv->dac.power(priv, or, data[0]);
- break;
- case NV50_DISP_DAC_LOAD:
- ret = priv->dac.sense(priv, or, data[0]);
- if (ret >= 0) {
- data[0] = ret;
- ret = 0;
- }
- break;
- default:
- BUG_ON(1);
- }
-
- return ret;
+ args->v0.load = (loadval & 0x38000000) >> 27;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 5a5b59b21130..39890221b91c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -30,7 +30,7 @@
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "dport.h"
#include "outpdp.h"
@@ -335,7 +335,7 @@ nouveau_dp_train(struct work_struct *w)
int ret;
/* bring capabilities within encoder limits */
- if (nv_mclass(disp) < NVD0_DISP_CLASS)
+ if (nv_mclass(disp) < GF110_DISP)
outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
@@ -354,7 +354,7 @@ nouveau_dp_train(struct work_struct *w)
cfg--;
/* disable link interrupt handling during link training */
- nouveau_event_put(outp->irq);
+ nvkm_notify_put(&outp->irq);
/* enable down-spreading and execute pre-train script from vbios */
dp_link_train_init(dp, outp->dpcd[3] & 0x01);
@@ -395,5 +395,5 @@ nouveau_dp_train(struct work_struct *w)
DBG("training complete\n");
atomic_set(&outp->lt.done, 1);
wake_up(&outp->lt.wait);
- nouveau_event_get(outp->irq);
+ nvkm_notify_get(&outp->irq);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index 9fc7447fec90..d54da8b5f87e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -35,17 +35,17 @@
static struct nouveau_oclass
gm107_disp_sclass[] = {
- { GM107_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { GM107_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { GM107_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { GM107_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { GM107_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
gm107_disp_base_oclass[] = {
- { GM107_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GM107_DISP, &nvd0_disp_base_ofuncs },
{}
};
@@ -93,9 +93,11 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index a19e7d79b847..8b4e06abe533 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -22,25 +22,37 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+nva3_hda_eld(NV50_DISP_MTHD_V1)
{
- const u32 soff = (or * 0x800);
- int i;
+ union {
+ struct nv50_disp_sor_hda_eld_v0 v0;
+ } *args = data;
+ const u32 soff = outp->or * 0x800;
+ int ret, i;
- if (data && data[0]) {
+ nv_ioctl(object, "disp sor hda eld size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+ if (size > 0x60)
+ return -E2BIG;
+ } else
+ return ret;
+
+ if (size && args->v0.data[0]) {
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ nv_wr32(priv, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
for (; i < 0x60; i++)
nv_wr32(priv, 0x61c440 + soff, (i << 8));
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
} else
- if (data) {
+ if (size) {
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
} else {
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index 717639386ced..baf558fc12fb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -33,19 +34,30 @@
#include "nv50.h"
int
-nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
+nvd0_hda_eld(NV50_DISP_MTHD_V1)
{
- const u32 soff = (or * 0x030);
- int i;
+ union {
+ struct nv50_disp_sor_hda_eld_v0 v0;
+ } *args = data;
+ const u32 soff = outp->or * 0x030;
+ int ret, i;
- if (data && data[0]) {
+ nv_ioctl(object, "disp sor hda eld size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp sor hda eld vers %d\n", args->v0.version);
+ if (size > 0x60)
+ return -E2BIG;
+ } else
+ return ret;
+
+ if (size && args->v0.data[0]) {
for (i = 0; i < size; i++)
- nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8) | args->v0.data[i]);
for (; i < 0x60; i++)
nv_wr32(priv, 0x10ec00 + soff, (i << 8));
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
} else
- if (data) {
+ if (size) {
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
} else {
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
index 7fdade6e604d..fa276dede9cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -22,17 +22,38 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+nv84_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 hoff = (head * 0x800);
+ union {
+ struct nv50_disp_sor_hdmi_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl;
+ int ret;
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
+ if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+ return -EINVAL;
+ ctrl = 0x40000000 * !!args->v0.state;
+ ctrl |= args->v0.max_ac_packet << 16;
+ ctrl |= args->v0.rekey;
+ ctrl |= 0x1f000000; /* ??? */
+ } else
+ return ret;
+
+ if (!(ctrl & 0x40000000)) {
nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
@@ -65,6 +86,6 @@ nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
- nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
index db8c6fd46278..57eeed1d1942 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -22,17 +22,38 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+nva3_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
- const u32 soff = (or * 0x800);
+ const u32 soff = outp->or * 0x800;
+ union {
+ struct nv50_disp_sor_hdmi_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl;
+ int ret;
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
+ if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+ return -EINVAL;
+ ctrl = 0x40000000 * !!args->v0.state;
+ ctrl |= args->v0.max_ac_packet << 16;
+ ctrl |= args->v0.rekey;
+ ctrl |= 0x1f000000; /* ??? */
+ } else
+ return ret;
+
+ if (!(ctrl & 0x40000000)) {
nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
@@ -65,6 +86,6 @@ nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
/* HDMI_CTRL */
- nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
+ nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
index 5151bb261832..3106d295b48d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -22,17 +22,37 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include "nv50.h"
int
-nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
+nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1)
{
const u32 hoff = (head * 0x800);
+ union {
+ struct nv50_disp_sor_hdmi_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl;
+ int ret;
- if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
+ nv_ioctl(object, "disp sor hdmi ctrl size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor hdmi ctrl vers %d state %d "
+ "max_ac_packet %d rekey %d\n",
+ args->v0.version, args->v0.state,
+ args->v0.max_ac_packet, args->v0.rekey);
+ if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f)
+ return -EINVAL;
+ ctrl = 0x40000000 * !!args->v0.state;
+ ctrl |= args->v0.max_ac_packet << 16;
+ ctrl |= args->v0.rekey;
+ } else
+ return ret;
+
+ if (!(ctrl & 0x40000000)) {
nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
@@ -54,7 +74,7 @@ nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
/* HDMI_CTRL */
- nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
+ nv_mask(priv, 0x616798 + hoff, 0x401f007f, ctrl);
/* NFI, audio doesn't work without it though.. */
nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index a32666ed0c47..366f315fc9a5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -24,60 +24,100 @@
#include "priv.h"
+#include <core/client.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
struct nv04_disp_priv {
struct nouveau_disp base;
};
static int
-nv04_disp_scanoutpos(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nv04_disp_scanoutpos(struct nouveau_object *object, struct nv04_disp_priv *priv,
+ void *data, u32 size, int head)
{
- struct nv04_disp_priv *priv = (void *)object->engine;
- struct nv04_display_scanoutpos *args = data;
- const int head = (mthd & NV04_DISP_MTHD_HEAD);
+ const u32 hoff = head * 0x2000;
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
u32 line;
+ int ret;
+
+ nv_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
+ args->v0.vblanks = nv_rd32(priv, 0x680800 + hoff) & 0xffff;
+ args->v0.vtotal = nv_rd32(priv, 0x680804 + hoff) & 0xffff;
+ args->v0.vblanke = args->v0.vtotal - 1;
+
+ args->v0.hblanks = nv_rd32(priv, 0x680820 + hoff) & 0xffff;
+ args->v0.htotal = nv_rd32(priv, 0x680824 + hoff) & 0xffff;
+ args->v0.hblanke = args->v0.htotal - 1;
+
+ /*
+ * If output is vga instead of digital then vtotal/htotal is
+ * invalid so we have to give up and trigger the timestamping
+ * fallback in the drm core.
+ */
+ if (!args->v0.vtotal || !args->v0.htotal)
+ return -ENOTSUPP;
+
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ line = nv_rd32(priv, 0x600868 + hoff);
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline = (line & 0xffff0000) >> 16;
+ args->v0.vline = (line & 0x0000ffff);
+ } else
+ return ret;
- if (size < sizeof(*args))
- return -EINVAL;
-
- args->vblanks = nv_rd32(priv, 0x680800 + (head * 0x2000)) & 0xffff;
- args->vtotal = nv_rd32(priv, 0x680804 + (head * 0x2000)) & 0xffff;
- args->vblanke = args->vtotal - 1;
-
- args->hblanks = nv_rd32(priv, 0x680820 + (head * 0x2000)) & 0xffff;
- args->htotal = nv_rd32(priv, 0x680824 + (head * 0x2000)) & 0xffff;
- args->hblanke = args->htotal - 1;
-
- /*
- * If output is vga instead of digital then vtotal/htotal is invalid
- * so we have to give up and trigger the timestamping fallback in the
- * drm core.
- */
- if (!args->vtotal || !args->htotal)
- return -ENOTSUPP;
-
- args->time[0] = ktime_to_ns(ktime_get());
- line = nv_rd32(priv, 0x600868 + (head * 0x2000));
- args->time[1] = ktime_to_ns(ktime_get());
- args->hline = (line & 0xffff0000) >> 16;
- args->vline = (line & 0x0000ffff);
return 0;
}
-#define HEAD_MTHD(n) (n), (n) + 0x01
+static int
+nv04_disp_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
+{
+ union {
+ struct nv04_disp_mthd_v0 v0;
+ } *args = data;
+ struct nv04_disp_priv *priv = (void *)object->engine;
+ int head, ret;
+
+ nv_ioctl(object, "disp mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+ args->v0.version, args->v0.method, args->v0.head);
+ mthd = args->v0.method;
+ head = args->v0.head;
+ } else
+ return ret;
-static struct nouveau_omthds
-nv04_disp_omthds[] = {
- { HEAD_MTHD(NV04_DISP_SCANOUTPOS), nv04_disp_scanoutpos },
- {}
+ if (head < 0 || head >= 2)
+ return -ENXIO;
+
+ switch (mthd) {
+ case NV04_DISP_SCANOUTPOS:
+ return nv04_disp_scanoutpos(object, priv, data, size, head);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static struct nouveau_ofuncs
+nv04_disp_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ .mthd = nv04_disp_mthd,
+ .ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
nv04_disp_sclass[] = {
- { NV04_DISP_CLASS, &nouveau_object_ofuncs, nv04_disp_omthds },
+ { NV04_DISP, &nv04_disp_ofuncs },
{},
};
@@ -86,17 +126,26 @@ nv04_disp_sclass[] = {
******************************************************************************/
static void
-nv04_disp_vblank_enable(struct nouveau_event *event, int type, int head)
+nv04_disp_vblank_init(struct nvkm_event *event, int type, int head)
{
- nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000001);
}
static void
-nv04_disp_vblank_disable(struct nouveau_event *event, int type, int head)
+nv04_disp_vblank_fini(struct nvkm_event *event, int type, int head)
{
- nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_wr32(disp, 0x600140 + (head * 0x2000) , 0x00000000);
}
+static const struct nvkm_event_func
+nv04_disp_vblank_func = {
+ .ctor = nouveau_disp_vblank_ctor,
+ .init = nv04_disp_vblank_init,
+ .fini = nv04_disp_vblank_fini,
+};
+
static void
nv04_disp_intr(struct nouveau_subdev *subdev)
{
@@ -106,12 +155,12 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
u32 pvideo;
if (crtc0 & 0x00000001) {
- nouveau_event_trigger(priv->base.vblank, 1, 0);
+ nouveau_disp_vblank(&priv->base, 0);
nv_wr32(priv, 0x600100, 0x00000001);
}
if (crtc1 & 0x00000001) {
- nouveau_event_trigger(priv->base.vblank, 1, 1);
+ nouveau_disp_vblank(&priv->base, 1);
nv_wr32(priv, 0x602100, 0x00000001);
}
@@ -140,9 +189,6 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
nv_engine(priv)->sclass = nv04_disp_sclass;
nv_subdev(priv)->intr = nv04_disp_intr;
- priv->base.vblank->priv = priv;
- priv->base.vblank->enable = nv04_disp_vblank_enable;
- priv->base.vblank->disable = nv04_disp_vblank_disable;
return 0;
}
@@ -155,4 +201,5 @@ nv04_disp_oclass = &(struct nouveau_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .vblank = &nv04_disp_vblank_func,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 2283c442a10d..4b5bb5d58a54 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -23,10 +23,12 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/parent.h>
#include <core/handle.h>
-#include <core/class.h>
#include <core/enum.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -43,14 +45,16 @@
* EVO channel base class
******************************************************************************/
-int
+static int
nv50_disp_chan_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int chid,
+ struct nouveau_oclass *oclass, int head,
int length, void **pobject)
{
+ const struct nv50_disp_chan_impl *impl = (void *)oclass->ofuncs;
struct nv50_disp_base *base = (void *)parent;
struct nv50_disp_chan *chan;
+ int chid = impl->chid + head;
int ret;
if (base->chan & (1 << chid))
@@ -63,12 +67,14 @@ nv50_disp_chan_create_(struct nouveau_object *parent,
chan = *pobject;
if (ret)
return ret;
-
chan->chid = chid;
+
+ nv_parent(chan)->object_attach = impl->attach;
+ nv_parent(chan)->object_detach = impl->detach;
return 0;
}
-void
+static void
nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
{
struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
@@ -76,6 +82,16 @@ nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
nouveau_namedb_destroy(&chan->base);
}
+int
+nv50_disp_chan_map(struct nouveau_object *object, u64 *addr, u32 *size)
+{
+ struct nv50_disp_chan *chan = (void *)object;
+ *addr = nv_device_resource_start(nv_device(object), 0) +
+ 0x640000 + (chan->chid * 0x1000);
+ *size = 0x001000;
+ return 0;
+}
+
u32
nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
{
@@ -115,16 +131,16 @@ nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
nouveau_ramht_remove(base->ramht, cookie);
}
-int
+static int
nv50_disp_dmac_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, u32 pushbuf, int chid,
+ struct nouveau_oclass *oclass, u32 pushbuf, int head,
int length, void **pobject)
{
struct nv50_disp_dmac *dmac;
int ret;
- ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
+ ret = nv50_disp_chan_create_(parent, engine, oclass, head,
length, pobject);
dmac = *pobject;
if (ret)
@@ -397,27 +413,32 @@ nv50_disp_mast_mthd_chan = {
}
};
-static int
+int
nv50_disp_mast_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_mast_class *args = data;
+ union {
+ struct nv50_disp_core_channel_dma_v0 v0;
+ } *args = data;
struct nv50_disp_dmac *mast;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create disp core channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp core channel dma vers %d "
+ "pushbuf %08x\n",
+ args->v0.version, args->v0.pushbuf);
+ } else
+ return ret;
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
0, sizeof(*mast), (void **)&mast);
*pobject = nv_object(mast);
if (ret)
return ret;
- nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
- nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
return 0;
}
@@ -479,14 +500,18 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
return nv50_disp_chan_fini(&mast->base, suspend);
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_mast_ofuncs = {
- .ctor = nv50_disp_mast_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nv50_disp_mast_init,
- .fini = nv50_disp_mast_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_mast_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nv50_disp_mast_init,
+ .base.fini = nv50_disp_mast_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 0,
+ .attach = nv50_disp_dmac_object_attach,
+ .detach = nv50_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -543,39 +568,51 @@ nv50_disp_sync_mthd_chan = {
}
};
-static int
+int
nv50_disp_sync_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_sync_class *args = data;
+ union {
+ struct nv50_disp_base_channel_dma_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp base channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp base channel dma vers %d "
+ "pushbuf %08x head %d\n",
+ args->v0.version, args->v0.pushbuf, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 1 + args->head, sizeof(*dmac),
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
+ args->v0.head, sizeof(*dmac),
(void **)&dmac);
*pobject = nv_object(dmac);
if (ret)
return ret;
- nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_sync_ofuncs = {
- .ctor = nv50_disp_sync_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nv50_disp_dmac_init,
- .fini = nv50_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_sync_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nv50_disp_dmac_init,
+ .base.fini = nv50_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 1,
+ .attach = nv50_disp_dmac_object_attach,
+ .detach = nv50_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -620,39 +657,51 @@ nv50_disp_ovly_mthd_chan = {
}
};
-static int
+int
nv50_disp_ovly_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_ovly_class *args = data;
+ union {
+ struct nv50_disp_overlay_channel_dma_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_dmac *dmac;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp overlay channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp overlay channel dma vers %d "
+ "pushbuf %08x head %d\n",
+ args->v0.version, args->v0.pushbuf, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 3 + args->head, sizeof(*dmac),
+ ret = nv50_disp_dmac_create_(parent, engine, oclass, args->v0.pushbuf,
+ args->v0.head, sizeof(*dmac),
(void **)&dmac);
*pobject = nv_object(dmac);
if (ret)
return ret;
- nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_ovly_ofuncs = {
- .ctor = nv50_disp_ovly_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nv50_disp_dmac_init,
- .fini = nv50_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_ovly_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nv50_disp_dmac_init,
+ .base.fini = nv50_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 3,
+ .attach = nv50_disp_dmac_object_attach,
+ .detach = nv50_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -662,14 +711,14 @@ nv50_disp_ovly_ofuncs = {
static int
nv50_disp_pioc_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int chid,
+ struct nouveau_oclass *oclass, int head,
int length, void **pobject)
{
- return nv50_disp_chan_create_(parent, engine, oclass, chid,
+ return nv50_disp_chan_create_(parent, engine, oclass, head,
length, pobject);
}
-static void
+void
nv50_disp_pioc_dtor(struct nouveau_object *object)
{
struct nv50_disp_pioc *pioc = (void *)object;
@@ -727,20 +776,29 @@ nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
* EVO immediate overlay channel objects
******************************************************************************/
-static int
+int
nv50_disp_oimm_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_oimm_class *args = data;
+ union {
+ struct nv50_disp_overlay_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_pioc *pioc;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp overlay size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp overlay vers %d head %d\n",
+ args->v0.version, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
sizeof(*pioc), (void **)&pioc);
*pobject = nv_object(pioc);
if (ret)
@@ -749,34 +807,45 @@ nv50_disp_oimm_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_oimm_ofuncs = {
- .ctor = nv50_disp_oimm_ctor,
- .dtor = nv50_disp_pioc_dtor,
- .init = nv50_disp_pioc_init,
- .fini = nv50_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_oimm_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nv50_disp_pioc_init,
+ .base.fini = nv50_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 5,
};
/*******************************************************************************
* EVO cursor channel objects
******************************************************************************/
-static int
+int
nv50_disp_curs_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_display_curs_class *args = data;
+ union {
+ struct nv50_disp_cursor_v0 v0;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)engine;
struct nv50_disp_pioc *pioc;
int ret;
- if (size < sizeof(*args) || args->head > 1)
- return -EINVAL;
+ nv_ioctl(parent, "create disp cursor size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create disp cursor vers %d head %d\n",
+ args->v0.version, args->v0.head);
+ if (args->v0.head > priv->head.nr)
+ return -EINVAL;
+ } else
+ return ret;
- ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
+ ret = nv50_disp_pioc_create_(parent, engine, oclass, args->v0.head,
sizeof(*pioc), (void **)&pioc);
*pobject = nv_object(pioc);
if (ret)
@@ -785,14 +854,16 @@ nv50_disp_curs_ctor(struct nouveau_object *parent,
return 0;
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nv50_disp_curs_ofuncs = {
- .ctor = nv50_disp_curs_ctor,
- .dtor = nv50_disp_pioc_dtor,
- .init = nv50_disp_pioc_init,
- .fini = nv50_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_curs_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nv50_disp_pioc_init,
+ .base.fini = nv50_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 7,
};
/*******************************************************************************
@@ -800,47 +871,162 @@ nv50_disp_curs_ofuncs = {
******************************************************************************/
int
-nv50_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv04_display_scanoutpos *args = data;
- const int head = (mthd & NV50_DISP_MTHD_HEAD);
- u32 blanke, blanks, total;
+ const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
+ const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
+ const u32 total = nv_rd32(priv, 0x610afc + (head * 0x540));
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
+ int ret;
+
+ nv_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
+ args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+ args->v0.hblanke = (blanke & 0x0000ffff);
+ args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+ args->v0.hblanks = (blanks & 0x0000ffff);
+ args->v0.vtotal = ( total & 0xffff0000) >> 16;
+ args->v0.htotal = ( total & 0x0000ffff);
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ args->v0.vline = /* vline read locks hline */
+ nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline =
+ nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ } else
+ return ret;
- if (size < sizeof(*args) || head >= priv->head.nr)
- return -EINVAL;
- blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
- blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
- total = nv_rd32(priv, 0x610afc + (head * 0x540));
-
- args->vblanke = (blanke & 0xffff0000) >> 16;
- args->hblanke = (blanke & 0x0000ffff);
- args->vblanks = (blanks & 0xffff0000) >> 16;
- args->hblanks = (blanks & 0x0000ffff);
- args->vtotal = ( total & 0xffff0000) >> 16;
- args->htotal = ( total & 0x0000ffff);
-
- args->time[0] = ktime_to_ns(ktime_get());
- args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
- args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
- args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
return 0;
}
-static void
-nv50_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
+int
+nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
{
- nv_mask(event->priv, 0x61002c, (4 << head), (4 << head));
-}
+ const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
+ union {
+ struct nv50_disp_mthd_v0 v0;
+ struct nv50_disp_mthd_v1 v1;
+ } *args = data;
+ struct nv50_disp_priv *priv = (void *)object->engine;
+ struct nvkm_output *outp = NULL;
+ struct nvkm_output *temp;
+ u16 type, mask = 0;
+ int head, ret;
-static void
-nv50_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
-{
- nv_mask(event->priv, 0x61002c, (4 << head), 0);
+ if (mthd != NV50_DISP_MTHD)
+ return -EINVAL;
+
+ nv_ioctl(object, "disp mthd size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(object, "disp mthd vers %d mthd %02x head %d\n",
+ args->v0.version, args->v0.method, args->v0.head);
+ mthd = args->v0.method;
+ head = args->v0.head;
+ } else
+ if (nvif_unpack(args->v1, 1, 1, true)) {
+ nv_ioctl(object, "disp mthd vers %d mthd %02x "
+ "type %04x mask %04x\n",
+ args->v1.version, args->v1.method,
+ args->v1.hasht, args->v1.hashm);
+ mthd = args->v1.method;
+ type = args->v1.hasht;
+ mask = args->v1.hashm;
+ head = ffs((mask >> 8) & 0x0f) - 1;
+ } else
+ return ret;
+
+ if (head < 0 || head >= priv->head.nr)
+ return -ENXIO;
+
+ if (mask) {
+ list_for_each_entry(temp, &priv->base.outp, head) {
+ if ((temp->info.hasht == type) &&
+ (temp->info.hashm & mask) == mask) {
+ outp = temp;
+ break;
+ }
+ }
+ if (outp == NULL)
+ return -ENXIO;
+ }
+
+ switch (mthd) {
+ case NV50_DISP_SCANOUTPOS:
+ return impl->head.scanoutpos(object, priv, data, size, head);
+ default:
+ break;
+ }
+
+ switch (mthd * !!outp) {
+ case NV50_DISP_MTHD_V1_DAC_PWR:
+ return priv->dac.power(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_DAC_LOAD:
+ return priv->dac.sense(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_PWR:
+ return priv->sor.power(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_HDA_ELD:
+ if (!priv->sor.hda_eld)
+ return -ENODEV;
+ return priv->sor.hda_eld(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_HDMI_PWR:
+ if (!priv->sor.hdmi)
+ return -ENODEV;
+ return priv->sor.hdmi(object, priv, data, size, head, outp);
+ case NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT: {
+ union {
+ struct nv50_disp_sor_lvds_script_v0 v0;
+ } *args = data;
+ nv_ioctl(object, "disp sor lvds script size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor lvds script "
+ "vers %d name %04x\n",
+ args->v0.version, args->v0.script);
+ priv->sor.lvdsconf = args->v0.script;
+ return 0;
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_SOR_DP_PWR: {
+ struct nvkm_output_dp *outpdp = (void *)outp;
+ union {
+ struct nv50_disp_sor_dp_pwr_v0 v0;
+ } *args = data;
+ nv_ioctl(object, "disp sor dp pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor dp pwr vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ if (args->v0.state == 0) {
+ nvkm_notify_put(&outpdp->irq);
+ ((struct nvkm_output_dp_impl *)nv_oclass(outp))
+ ->lnk_pwr(outpdp, 0);
+ atomic_set(&outpdp->lt.done, 0);
+ return 0;
+ } else
+ if (args->v0.state != 0) {
+ nvkm_output_dp_train(&outpdp->base, 0, true);
+ return 0;
+ }
+ } else
+ return ret;
+ }
+ break;
+ case NV50_DISP_MTHD_V1_PIOR_PWR:
+ if (!priv->pior.power)
+ return -ENODEV;
+ return priv->pior.power(object, priv, data, size, head, outp);
+ default:
+ break;
+ }
+
+ return -EINVAL;
}
-static int
+int
nv50_disp_base_ctor(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -856,14 +1042,11 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- priv->base.vblank->priv = priv;
- priv->base.vblank->enable = nv50_disp_base_vblank_enable;
- priv->base.vblank->disable = nv50_disp_base_vblank_disable;
return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
&base->ramht);
}
-static void
+void
nv50_disp_base_dtor(struct nouveau_object *object)
{
struct nv50_disp_base *base = (void *)object;
@@ -958,34 +1141,23 @@ nv50_disp_base_ofuncs = {
.dtor = nv50_disp_base_dtor,
.init = nv50_disp_base_init,
.fini = nv50_disp_base_fini,
-};
-
-static struct nouveau_omthds
-nv50_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
+ .mthd = nv50_disp_base_mthd,
+ .ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
nv50_disp_base_oclass[] = {
- { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
+ { NV50_DISP, &nv50_disp_base_ofuncs },
{}
};
static struct nouveau_oclass
nv50_disp_sclass[] = {
- { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
@@ -1005,7 +1177,7 @@ nv50_disp_data_ctor(struct nouveau_object *parent,
int ret = -EBUSY;
/* no context needed for channel objects... */
- if (nv_mclass(parent) != NV_DEVICE_CLASS) {
+ if (nv_mclass(parent) != NV_DEVICE) {
atomic_inc(&parent->refcount);
*pobject = parent;
return 1;
@@ -1040,6 +1212,27 @@ nv50_disp_cclass = {
* Display engine implementation
******************************************************************************/
+static void
+nv50_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x61002c, (4 << head), 0);
+}
+
+static void
+nv50_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x61002c, (4 << head), (4 << head));
+}
+
+const struct nvkm_event_func
+nv50_disp_vblank_func = {
+ .ctor = nouveau_disp_vblank_ctor,
+ .init = nv50_disp_vblank_init,
+ .fini = nv50_disp_vblank_fini,
+};
+
static const struct nouveau_enum
nv50_disp_intr_error_type[] = {
{ 3, "ILLEGAL_MTHD" },
@@ -1381,7 +1574,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
int TU, VTUi, VTUf, VTUa;
u64 link_data_rate, link_ratio, unk;
u32 best_diff = 64 * symbol;
- u32 link_nr, link_bw, bits, r;
+ u32 link_nr, link_bw, bits;
/* calculate packed data rate for each lane */
if (dpctrl > 0x00030000) link_nr = 4;
@@ -1401,7 +1594,7 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
/* calculate ratio of packed data rate to link symbol rate */
link_ratio = link_data_rate * symbol;
- r = do_div(link_ratio, link_bw);
+ do_div(link_ratio, link_bw);
for (TU = 64; TU >= 32; TU--) {
/* calculate average number of valid symbols in each TU */
@@ -1462,8 +1655,8 @@ nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
/* XXX close to vbios numbers, but not right */
unk = (symbol - link_ratio) * bestTU;
unk *= link_ratio;
- r = do_div(unk, symbol);
- r = do_div(unk, symbol);
+ do_div(unk, symbol);
+ do_div(unk, symbol);
unk += 6;
nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
@@ -1654,13 +1847,13 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
}
if (intr1 & 0x00000004) {
- nouveau_event_trigger(priv->base.vblank, 1, 0);
+ nouveau_disp_vblank(&priv->base, 0);
nv_wr32(priv, 0x610024, 0x00000004);
intr1 &= ~0x00000004;
}
if (intr1 & 0x00000008) {
- nouveau_event_trigger(priv->base.vblank, 1, 1);
+ nouveau_disp_vblank(&priv->base, 1);
nv_wr32(priv, 0x610024, 0x00000008);
intr1 &= ~0x00000008;
}
@@ -1718,9 +1911,11 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv50_disp_mast_mthd_chan,
.mthd.base = &nv50_disp_sync_mthd_chan,
.mthd.ovly = &nv50_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 1a886472b6f5..8ab14461f70c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -14,15 +14,10 @@
#include "outp.h"
#include "outpdp.h"
-struct nv50_disp_impl {
- struct nouveau_disp_impl base;
- struct {
- const struct nv50_disp_mthd_chan *core;
- const struct nv50_disp_mthd_chan *base;
- const struct nv50_disp_mthd_chan *ovly;
- int prev;
- } mthd;
-};
+#define NV50_DISP_MTHD_ struct nouveau_object *object, \
+ struct nv50_disp_priv *priv, void *data, u32 size
+#define NV50_DISP_MTHD_V0 NV50_DISP_MTHD_, int head
+#define NV50_DISP_MTHD_V1 NV50_DISP_MTHD_, int head, struct nvkm_output *outp
struct nv50_disp_priv {
struct nouveau_disp base;
@@ -36,44 +31,52 @@ struct nv50_disp_priv {
} head;
struct {
int nr;
- int (*power)(struct nv50_disp_priv *, int dac, u32 data);
- int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*sense)(NV50_DISP_MTHD_V1);
} dac;
struct {
int nr;
- int (*power)(struct nv50_disp_priv *, int sor, u32 data);
- int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
- int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
+ int (*power)(NV50_DISP_MTHD_V1);
+ int (*hda_eld)(NV50_DISP_MTHD_V1);
+ int (*hdmi)(NV50_DISP_MTHD_V1);
u32 lvdsconf;
} sor;
struct {
int nr;
- int (*power)(struct nv50_disp_priv *, int ext, u32 data);
+ int (*power)(NV50_DISP_MTHD_V1);
u8 type[3];
} pior;
};
-#define HEAD_MTHD(n) (n), (n) + 0x03
-
-int nv50_disp_base_scanoutpos(struct nouveau_object *, u32, void *, u32);
+struct nv50_disp_impl {
+ struct nouveau_disp_impl base;
+ struct {
+ const struct nv50_disp_mthd_chan *core;
+ const struct nv50_disp_mthd_chan *base;
+ const struct nv50_disp_mthd_chan *ovly;
+ int prev;
+ } mthd;
+ struct {
+ int (*scanoutpos)(NV50_DISP_MTHD_V0);
+ } head;
+};
-#define DAC_MTHD(n) (n), (n) + 0x03
+int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
+int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_dac_power(struct nv50_disp_priv *, int, u32);
-int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
+int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0);
-#define SOR_MTHD(n) (n), (n) + 0x3f
+int nv50_dac_power(NV50_DISP_MTHD_V1);
+int nv50_dac_sense(NV50_DISP_MTHD_V1);
-int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
-int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
+int nva3_hda_eld(NV50_DISP_MTHD_V1);
+int nvd0_hda_eld(NV50_DISP_MTHD_V1);
-int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
-int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
-int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
+int nv84_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int nva3_hdmi_ctrl(NV50_DISP_MTHD_V1);
+int nvd0_hdmi_ctrl(NV50_DISP_MTHD_V1);
-int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_sor_power(struct nv50_disp_priv *, int, u32);
+int nv50_sor_power(NV50_DISP_MTHD_V1);
int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
u32, struct dcb_output *);
@@ -93,10 +96,7 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
struct dcb_output *);
-#define PIOR_MTHD(n) (n), (n) + 0x03
-
-int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
-int nv50_pior_power(struct nv50_disp_priv *, int, u32);
+int nv50_pior_power(NV50_DISP_MTHD_V1);
struct nv50_disp_base {
struct nouveau_parent base;
@@ -104,14 +104,19 @@ struct nv50_disp_base {
u32 chan;
};
+struct nv50_disp_chan_impl {
+ struct nouveau_ofuncs base;
+ int chid;
+ int (*attach)(struct nouveau_object *, struct nouveau_object *, u32);
+ void (*detach)(struct nouveau_object *, int);
+};
+
struct nv50_disp_chan {
struct nouveau_namedb base;
int chid;
};
-int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, int, void **);
-void nv50_disp_chan_destroy(struct nv50_disp_chan *);
+int nv50_disp_chan_map(struct nouveau_object *, u64 *, u32 *);
u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
@@ -120,20 +125,20 @@ void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
#define nv50_disp_chan_fini(a,b) \
nouveau_namedb_fini(&(a)->base, (b))
-int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, u32, int, int, void **);
-void nv50_disp_dmac_dtor(struct nouveau_object *);
-
struct nv50_disp_dmac {
struct nv50_disp_chan base;
struct nouveau_dmaobj *pushdma;
u32 push;
};
+void nv50_disp_dmac_dtor(struct nouveau_object *);
+
struct nv50_disp_pioc {
struct nv50_disp_chan base;
};
+void nv50_disp_pioc_dtor(struct nouveau_object *);
+
struct nv50_disp_mthd_list {
u32 mthd;
u32 addr;
@@ -154,47 +159,67 @@ struct nv50_disp_mthd_chan {
} data[];
};
-extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs;
+int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base;
extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor;
extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior;
-extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs;
+int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image;
-extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
+int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern const struct nv50_disp_mthd_list nv50_disp_ovly_mthd_base;
-extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
-extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
+extern struct nv50_disp_chan_impl nv50_disp_oimm_ofuncs;
+int nv50_disp_oimm_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
+int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
+int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nv50_disp_base_dtor(struct nouveau_object *);
+extern struct nouveau_omthds nv50_disp_base_omthds[];
extern struct nouveau_oclass nv50_disp_cclass;
void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
const struct nv50_disp_mthd_chan *);
void nv50_disp_intr_supervisor(struct work_struct *);
void nv50_disp_intr(struct nouveau_subdev *);
+extern const struct nvkm_event_func nv50_disp_vblank_func;
extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan;
extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac;
extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head;
extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan;
extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
-extern struct nouveau_omthds nv84_disp_base_omthds[];
extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan;
-extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor;
extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior;
-extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan;
-extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
-extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
-extern struct nouveau_omthds nvd0_disp_base_omthds[];
+extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
+extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
extern struct nouveau_oclass nvd0_disp_cclass;
void nvd0_disp_intr_supervisor(struct work_struct *);
void nvd0_disp_intr(struct nouveau_subdev *);
+extern const struct nvkm_event_func nvd0_disp_vblank_func;
extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan;
extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index 1cc62e434683..788ced1b6182 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -204,31 +204,17 @@ nv84_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nv84_disp_sclass[] = {
- { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
-struct nouveau_omthds
-nv84_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
-};
-
static struct nouveau_oclass
nv84_disp_base_oclass[] = {
- { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ { G82_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -276,9 +262,11 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv84_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index 4f718a9f5aef..fa79de906eae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -63,32 +63,17 @@ nv94_disp_mast_mthd_chan = {
static struct nouveau_oclass
nv94_disp_sclass[] = {
- { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_omthds
-nv94_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
-};
-
static struct nouveau_oclass
nv94_disp_base_oclass[] = {
- { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
+ { GT206_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -143,9 +128,11 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
.mthd.core = &nv94_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 6237a9a36f70..7af15f5d48dc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -80,17 +80,17 @@ nva0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nva0_disp_sclass[] = {
- { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
nva0_disp_base_oclass[] = {
- { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
+ { GT200_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -138,9 +138,11 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv50_disp_outp_sclass,
.mthd.core = &nv84_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nva0_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 019124d4782b..6bd39448f8da 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -35,33 +35,17 @@
static struct nouveau_oclass
nva3_disp_sclass[] = {
- { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
- { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
- { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
- { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
- { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
+ { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base },
+ { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base },
+ { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
+ { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
+ { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
{}
};
-static struct nouveau_omthds
-nva3_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nv50_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
-};
-
static struct nouveau_oclass
nva3_disp_base_oclass[] = {
- { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
+ { GT214_DISP, &nv50_disp_base_ofuncs },
{}
};
@@ -110,9 +94,11 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nv50_disp_vblank_func,
.base.outp = nv94_disp_outp_sclass,
.mthd.core = &nv94_disp_mast_mthd_chan,
.mthd.base = &nv84_disp_sync_mthd_chan,
.mthd.ovly = &nv84_disp_ovly_mthd_chan,
.mthd.prev = 0x000004,
+ .head.scanoutpos = nv50_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index fa30d8196f35..a4bb3c774ee1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -23,9 +23,11 @@
*/
#include <core/object.h>
+#include <core/client.h>
#include <core/parent.h>
#include <core/handle.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <engine/disp.h>
@@ -265,30 +267,6 @@ nvd0_disp_mast_mthd_chan = {
};
static int
-nvd0_disp_mast_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_mast_class *args = data;
- struct nv50_disp_dmac *mast;
- int ret;
-
- if (size < sizeof(*args))
- return -EINVAL;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 0, sizeof(*mast), (void **)&mast);
- *pobject = nv_object(mast);
- if (ret)
- return ret;
-
- nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
- nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
- return 0;
-}
-
-static int
nvd0_disp_mast_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
@@ -342,14 +320,18 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
return nv50_disp_chan_fini(&mast->base, suspend);
}
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_mast_ofuncs = {
- .ctor = nvd0_disp_mast_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nvd0_disp_mast_init,
- .fini = nvd0_disp_mast_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_mast_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nvd0_disp_mast_init,
+ .base.fini = nvd0_disp_mast_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 0,
+ .attach = nvd0_disp_dmac_object_attach,
+ .detach = nvd0_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -431,40 +413,18 @@ nvd0_disp_sync_mthd_chan = {
}
};
-static int
-nvd0_disp_sync_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_sync_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_dmac *dmac;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 1 + args->head, sizeof(*dmac),
- (void **)&dmac);
- *pobject = nv_object(dmac);
- if (ret)
- return ret;
-
- nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_sync_ofuncs = {
- .ctor = nvd0_disp_sync_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nvd0_disp_dmac_init,
- .fini = nvd0_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_sync_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nvd0_disp_dmac_init,
+ .base.fini = nvd0_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 1,
+ .attach = nvd0_disp_dmac_object_attach,
+ .detach = nvd0_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -533,40 +493,18 @@ nvd0_disp_ovly_mthd_chan = {
}
};
-static int
-nvd0_disp_ovly_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_ovly_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_dmac *dmac;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
- 5 + args->head, sizeof(*dmac),
- (void **)&dmac);
- *pobject = nv_object(dmac);
- if (ret)
- return ret;
-
- nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
- nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_ovly_ofuncs = {
- .ctor = nvd0_disp_ovly_ctor,
- .dtor = nv50_disp_dmac_dtor,
- .init = nvd0_disp_dmac_init,
- .fini = nvd0_disp_dmac_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_ovly_ctor,
+ .base.dtor = nv50_disp_dmac_dtor,
+ .base.init = nvd0_disp_dmac_init,
+ .base.fini = nvd0_disp_dmac_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 5,
+ .attach = nvd0_disp_dmac_object_attach,
+ .detach = nvd0_disp_dmac_object_detach,
};
/*******************************************************************************
@@ -574,23 +512,6 @@ nvd0_disp_ovly_ofuncs = {
******************************************************************************/
static int
-nvd0_disp_pioc_create_(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int chid,
- int length, void **pobject)
-{
- return nv50_disp_chan_create_(parent, engine, oclass, chid,
- length, pobject);
-}
-
-static void
-nvd0_disp_pioc_dtor(struct nouveau_object *object)
-{
- struct nv50_disp_pioc *pioc = (void *)object;
- nv50_disp_chan_destroy(&pioc->base);
-}
-
-static int
nvd0_disp_pioc_init(struct nouveau_object *object)
{
struct nv50_disp_priv *priv = (void *)object->engine;
@@ -643,152 +564,68 @@ nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
* EVO immediate overlay channel objects
******************************************************************************/
-static int
-nvd0_disp_oimm_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_oimm_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_pioc *pioc;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
- sizeof(*pioc), (void **)&pioc);
- *pobject = nv_object(pioc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_oimm_ofuncs = {
- .ctor = nvd0_disp_oimm_ctor,
- .dtor = nvd0_disp_pioc_dtor,
- .init = nvd0_disp_pioc_init,
- .fini = nvd0_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_oimm_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nvd0_disp_pioc_init,
+ .base.fini = nvd0_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 9,
};
/*******************************************************************************
* EVO cursor channel objects
******************************************************************************/
-static int
-nvd0_disp_curs_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_display_curs_class *args = data;
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_pioc *pioc;
- int ret;
-
- if (size < sizeof(*args) || args->head >= priv->head.nr)
- return -EINVAL;
-
- ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
- sizeof(*pioc), (void **)&pioc);
- *pobject = nv_object(pioc);
- if (ret)
- return ret;
-
- return 0;
-}
-
-struct nouveau_ofuncs
+struct nv50_disp_chan_impl
nvd0_disp_curs_ofuncs = {
- .ctor = nvd0_disp_curs_ctor,
- .dtor = nvd0_disp_pioc_dtor,
- .init = nvd0_disp_pioc_init,
- .fini = nvd0_disp_pioc_fini,
- .rd32 = nv50_disp_chan_rd32,
- .wr32 = nv50_disp_chan_wr32,
+ .base.ctor = nv50_disp_curs_ctor,
+ .base.dtor = nv50_disp_pioc_dtor,
+ .base.init = nvd0_disp_pioc_init,
+ .base.fini = nvd0_disp_pioc_fini,
+ .base.map = nv50_disp_chan_map,
+ .base.rd32 = nv50_disp_chan_rd32,
+ .base.wr32 = nv50_disp_chan_wr32,
+ .chid = 13,
};
/*******************************************************************************
* Base display object
******************************************************************************/
-static int
-nvd0_disp_base_scanoutpos(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- struct nv04_display_scanoutpos *args = data;
- const int head = (mthd & NV50_DISP_MTHD_HEAD);
- u32 blanke, blanks, total;
-
- if (size < sizeof(*args) || head >= priv->head.nr)
- return -EINVAL;
-
- total = nv_rd32(priv, 0x640414 + (head * 0x300));
- blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
- blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
-
- args->vblanke = (blanke & 0xffff0000) >> 16;
- args->hblanke = (blanke & 0x0000ffff);
- args->vblanks = (blanks & 0xffff0000) >> 16;
- args->hblanks = (blanks & 0x0000ffff);
- args->vtotal = ( total & 0xffff0000) >> 16;
- args->htotal = ( total & 0x0000ffff);
-
- args->time[0] = ktime_to_ns(ktime_get());
- args->vline = nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
- args->time[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
- args->hline = nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
- return 0;
-}
-
-static void
-nvd0_disp_base_vblank_enable(struct nouveau_event *event, int type, int head)
-{
- nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
-}
-
-static void
-nvd0_disp_base_vblank_disable(struct nouveau_event *event, int type, int head)
+int
+nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
{
- nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
-}
-
-static int
-nvd0_disp_base_ctor(struct nouveau_object *parent,
- struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_disp_priv *priv = (void *)engine;
- struct nv50_disp_base *base;
+ const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
+ const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
+ const u32 blanks = nv_rd32(priv, 0x640420 + (head * 0x300));
+ union {
+ struct nv04_disp_scanoutpos_v0 v0;
+ } *args = data;
int ret;
- ret = nouveau_parent_create(parent, engine, oclass, 0,
- priv->sclass, 0, &base);
- *pobject = nv_object(base);
- if (ret)
+ nv_ioctl(object, "disp scanoutpos size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp scanoutpos vers %d\n", args->v0.version);
+ args->v0.vblanke = (blanke & 0xffff0000) >> 16;
+ args->v0.hblanke = (blanke & 0x0000ffff);
+ args->v0.vblanks = (blanks & 0xffff0000) >> 16;
+ args->v0.hblanks = (blanks & 0x0000ffff);
+ args->v0.vtotal = ( total & 0xffff0000) >> 16;
+ args->v0.htotal = ( total & 0x0000ffff);
+ args->v0.time[0] = ktime_to_ns(ktime_get());
+ args->v0.vline = /* vline read locks hline */
+ nv_rd32(priv, 0x616340 + (head * 0x800)) & 0xffff;
+ args->v0.time[1] = ktime_to_ns(ktime_get());
+ args->v0.hline =
+ nv_rd32(priv, 0x616344 + (head * 0x800)) & 0xffff;
+ } else
return ret;
- priv->base.vblank->priv = priv;
- priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
- priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
-
- return nouveau_ramht_new(nv_object(base), nv_object(base), 0x1000, 0,
- &base->ramht);
-}
-
-static void
-nvd0_disp_base_dtor(struct nouveau_object *object)
-{
- struct nv50_disp_base *base = (void *)object;
- nouveau_ramht_ref(NULL, &base->ramht);
- nouveau_parent_destroy(&base->base);
+ return 0;
}
static int
@@ -874,41 +711,27 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
struct nouveau_ofuncs
nvd0_disp_base_ofuncs = {
- .ctor = nvd0_disp_base_ctor,
- .dtor = nvd0_disp_base_dtor,
+ .ctor = nv50_disp_base_ctor,
+ .dtor = nv50_disp_base_dtor,
.init = nvd0_disp_base_init,
.fini = nvd0_disp_base_fini,
-};
-
-struct nouveau_omthds
-nvd0_disp_base_omthds[] = {
- { HEAD_MTHD(NV50_DISP_SCANOUTPOS) , nvd0_disp_base_scanoutpos },
- { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
- { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
- { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
- { SOR_MTHD(NV94_DISP_SOR_DP_PWR) , nv50_sor_mthd },
- { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
- { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
- { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
- {},
+ .mthd = nv50_disp_base_mthd,
+ .ntfy = nouveau_disp_ntfy,
};
static struct nouveau_oclass
nvd0_disp_base_oclass[] = {
- { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GF110_DISP, &nvd0_disp_base_ofuncs },
{}
};
static struct nouveau_oclass
nvd0_disp_sclass[] = {
- { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
@@ -916,6 +739,27 @@ nvd0_disp_sclass[] = {
* Display engine implementation
******************************************************************************/
+static void
+nvd0_disp_vblank_init(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
+}
+
+static void
+nvd0_disp_vblank_fini(struct nvkm_event *event, int type, int head)
+{
+ struct nouveau_disp *disp = container_of(event, typeof(*disp), vblank);
+ nv_mask(disp, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
+}
+
+const struct nvkm_event_func
+nvd0_disp_vblank_func = {
+ .ctor = nouveau_disp_vblank_ctor,
+ .init = nvd0_disp_vblank_init,
+ .fini = nvd0_disp_vblank_fini,
+};
+
static struct nvkm_output *
exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
u32 *data, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -1343,7 +1187,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
if (mask & intr) {
u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
if (stat & 0x00000001)
- nouveau_event_trigger(priv->base.vblank, 1, i);
+ nouveau_disp_vblank(&priv->base, i);
nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
nv_rd32(priv, 0x6100c0 + (i * 0x800));
}
@@ -1396,9 +1240,11 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nvd0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nvd0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 11328e3f5df1..47fef1e398c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -200,17 +200,17 @@ nve0_disp_ovly_mthd_chan = {
static struct nouveau_oclass
nve0_disp_sclass[] = {
- { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
nve0_disp_base_oclass[] = {
- { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GK104_DISP, &nvd0_disp_base_ofuncs },
{}
};
@@ -258,9 +258,11 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 104388081d73..04bda4ac4ed3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -25,7 +25,7 @@
#include <engine/software.h>
#include <engine/disp.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include "nv50.h"
@@ -35,17 +35,17 @@
static struct nouveau_oclass
nvf0_disp_sclass[] = {
- { NVF0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
- { NVF0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
- { NVF0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
- { NVF0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
- { NVF0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
+ { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base },
+ { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base },
+ { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
+ { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
+ { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
{}
};
static struct nouveau_oclass
nvf0_disp_base_oclass[] = {
- { NVF0_DISP_CLASS, &nvd0_disp_base_ofuncs, nvd0_disp_base_omthds },
+ { GK110_DISP, &nvd0_disp_base_ofuncs },
{}
};
@@ -93,9 +93,11 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
.init = _nouveau_disp_init,
.fini = _nouveau_disp_fini,
},
+ .base.vblank = &nvd0_disp_vblank_func,
.base.outp = nvd0_disp_outp_sclass,
.mthd.core = &nve0_disp_mast_mthd_chan,
.mthd.base = &nvd0_disp_sync_mthd_chan,
.mthd.ovly = &nve0_disp_ovly_mthd_chan,
.mthd.prev = -0x020000,
+ .head.scanoutpos = nvd0_disp_base_scanoutpos,
}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
index ad9ba7ccec7f..a5ff00a9cedc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -78,6 +78,7 @@ nvkm_output_create_(struct nouveau_object *parent,
outp->info = *dcbE;
outp->index = index;
+ outp->or = ffs(outp->info.or) - 1;
DBG("type %02x loc %d or %d link %d con %x edid %x bus %d head %x\n",
dcbE->type, dcbE->location, dcbE->or, dcbE->type >= 2 ?
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
index bc76fbf85710..187f435ad0e2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.h
@@ -9,6 +9,7 @@ struct nvkm_output {
struct dcb_output info;
int index;
+ int or;
struct nouveau_i2c_port *port;
struct nouveau_i2c_port *edid;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index eb2d7789555d..6f6e2a898270 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -22,6 +22,9 @@
* Authors: Ben Skeggs
*/
+#include <core/os.h>
+#include <nvif/event.h>
+
#include <subdev/i2c.h>
#include "outpdp.h"
@@ -86,7 +89,7 @@ done:
atomic_set(&outp->lt.done, 0);
schedule_work(&outp->lt.work);
} else {
- nouveau_event_get(outp->irq);
+ nvkm_notify_get(&outp->irq);
}
if (wait) {
@@ -133,46 +136,59 @@ nvkm_output_dp_detect(struct nvkm_output_dp *outp)
}
}
-static void
-nvkm_output_dp_service_work(struct work_struct *work)
+static int
+nvkm_output_dp_hpd(struct nvkm_notify *notify)
{
- struct nvkm_output_dp *outp = container_of(work, typeof(*outp), work);
- struct nouveau_disp *disp = nouveau_disp(outp);
- int type = atomic_xchg(&outp->pending, 0);
- u32 send = 0;
-
- if (type & (NVKM_I2C_PLUG | NVKM_I2C_UNPLUG)) {
- nvkm_output_dp_detect(outp);
- if (type & NVKM_I2C_UNPLUG)
- send |= NVKM_HPD_UNPLUG;
- if (type & NVKM_I2C_PLUG)
- send |= NVKM_HPD_PLUG;
- nouveau_event_get(outp->base.conn->hpd.event);
- }
-
- if (type & NVKM_I2C_IRQ) {
- nvkm_output_dp_train(&outp->base, 0, true);
- send |= NVKM_HPD_IRQ;
+ struct nvkm_connector *conn = container_of(notify, typeof(*conn), hpd);
+ struct nvkm_output_dp *outp;
+ struct nouveau_disp *disp = nouveau_disp(conn);
+ const struct nvkm_i2c_ntfy_rep *line = notify->data;
+ struct nvif_notify_conn_rep_v0 rep = {};
+
+ list_for_each_entry(outp, &disp->outp, base.head) {
+ if (outp->base.conn == conn &&
+ outp->info.type == DCB_OUTPUT_DP) {
+ DBG("HPD: %d\n", line->mask);
+ nvkm_output_dp_detect(outp);
+
+ if (line->mask & NVKM_I2C_UNPLUG)
+ rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
+ if (line->mask & NVKM_I2C_PLUG)
+ rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
+
+ nvkm_event_send(&disp->hpd, rep.mask, conn->index,
+ &rep, sizeof(rep));
+ return NVKM_NOTIFY_KEEP;
+ }
}
- nouveau_event_trigger(disp->hpd, send, outp->base.info.connector);
+ WARN_ON(1);
+ return NVKM_NOTIFY_DROP;
}
static int
-nvkm_output_dp_service(void *data, u32 type, int index)
+nvkm_output_dp_irq(struct nvkm_notify *notify)
{
- struct nvkm_output_dp *outp = data;
- DBG("HPD: %d\n", type);
- atomic_or(type, &outp->pending);
- schedule_work(&outp->work);
- return NVKM_EVENT_DROP;
+ struct nvkm_output_dp *outp = container_of(notify, typeof(*outp), irq);
+ struct nouveau_disp *disp = nouveau_disp(outp);
+ const struct nvkm_i2c_ntfy_rep *line = notify->data;
+ struct nvif_notify_conn_rep_v0 rep = {
+ .mask = NVIF_NOTIFY_CONN_V0_IRQ,
+ };
+ int index = outp->base.info.connector;
+
+ DBG("IRQ: %d\n", line->mask);
+ nvkm_output_dp_train(&outp->base, 0, true);
+
+ nvkm_event_send(&disp->hpd, rep.mask, index, &rep, sizeof(rep));
+ return NVKM_NOTIFY_DROP;
}
int
_nvkm_output_dp_fini(struct nouveau_object *object, bool suspend)
{
struct nvkm_output_dp *outp = (void *)object;
- nouveau_event_put(outp->irq);
+ nvkm_notify_put(&outp->irq);
nvkm_output_dp_enable(outp, false);
return nvkm_output_fini(&outp->base, suspend);
}
@@ -189,7 +205,7 @@ void
_nvkm_output_dp_dtor(struct nouveau_object *object)
{
struct nvkm_output_dp *outp = (void *)object;
- nouveau_event_ref(NULL, &outp->irq);
+ nvkm_notify_fini(&outp->irq);
nvkm_output_destroy(&outp->base);
}
@@ -213,7 +229,7 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
if (ret)
return ret;
- nouveau_event_ref(NULL, &outp->base.conn->hpd.event);
+ nvkm_notify_fini(&outp->base.conn->hpd);
/* access to the aux channel is not optional... */
if (!outp->base.edid) {
@@ -238,20 +254,28 @@ nvkm_output_dp_create_(struct nouveau_object *parent,
atomic_set(&outp->lt.done, 0);
/* link maintenance */
- ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_IRQ, outp->base.edid->index,
- nvkm_output_dp_service, outp, &outp->irq);
+ ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_irq, true,
+ &(struct nvkm_i2c_ntfy_req) {
+ .mask = NVKM_I2C_IRQ,
+ .port = outp->base.edid->index,
+ },
+ sizeof(struct nvkm_i2c_ntfy_req),
+ sizeof(struct nvkm_i2c_ntfy_rep),
+ &outp->irq);
if (ret) {
ERR("error monitoring aux irq event: %d\n", ret);
return ret;
}
- INIT_WORK(&outp->work, nvkm_output_dp_service_work);
-
/* hotplug detect, replaces gpio-based mechanism with aux events */
- ret = nouveau_event_new(i2c->ntfy, NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
- outp->base.edid->index,
- nvkm_output_dp_service, outp,
- &outp->base.conn->hpd.event);
+ ret = nvkm_notify_init(&i2c->event, nvkm_output_dp_hpd, true,
+ &(struct nvkm_i2c_ntfy_req) {
+ .mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG,
+ .port = outp->base.edid->index,
+ },
+ sizeof(struct nvkm_i2c_ntfy_req),
+ sizeof(struct nvkm_i2c_ntfy_rep),
+ &outp->base.conn->hpd);
if (ret) {
ERR("error monitoring aux hpd events: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
index ff33ba12cb67..1fac367cc867 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.h
@@ -12,10 +12,7 @@ struct nvkm_output_dp {
struct nvbios_dpout info;
u8 version;
- struct nouveau_eventh *irq;
- struct nouveau_eventh *hpd;
- struct work_struct work;
- atomic_t pending;
+ struct nvkm_notify irq;
bool present;
u8 dpcd[16];
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
index fe0f256f11bf..d00f89a468a7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -143,38 +144,29 @@ nv50_pior_dp_impl = {
*****************************************************************************/
int
-nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
+nv50_pior_power(NV50_DISP_MTHD_V1)
{
- const u32 stat = data & NV50_DISP_PIOR_PWR_STATE;
- const u32 soff = (or * 0x800);
+ const u32 soff = outp->or * 0x800;
+ union {
+ struct nv50_disp_pior_pwr_v0 v0;
+ } *args = data;
+ u32 ctrl, type;
+ int ret;
+
+ nv_ioctl(object, "disp pior pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp pior pwr vers %d state %d type %x\n",
+ args->v0.version, args->v0.state, args->v0.type);
+ if (args->v0.type > 0x0f)
+ return -EINVAL;
+ ctrl = !!args->v0.state;
+ type = args->v0.type;
+ } else
+ return ret;
+
nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
- nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat);
+ nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | ctrl);
nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
+ priv->pior.type[outp->or] = type;
return 0;
}
-
-int
-nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
- const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR);
- u32 *data = args;
- int ret;
-
- if (size < sizeof(u32))
- return -EINVAL;
-
- mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
- mthd &= ~NV50_DISP_PIOR_MTHD_OR;
- switch (mthd) {
- case NV50_DISP_PIOR_PWR:
- ret = priv->pior.power(priv, or, data[0]);
- priv->pior.type[or] = type;
- break;
- default:
- return -EINVAL;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
index 26e9a42569c7..dbd43ae9df81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/priv.h
@@ -11,6 +11,7 @@ struct nouveau_disp_impl {
struct nouveau_oclass base;
struct nouveau_oclass **outp;
struct nouveau_oclass **conn;
+ const struct nvkm_event_func *vblank;
};
#define nouveau_disp_create(p,e,c,h,i,x,d) \
@@ -39,4 +40,8 @@ int _nouveau_disp_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass *nvkm_output_oclass;
extern struct nouveau_oclass *nvkm_connector_oclass;
+int nouveau_disp_vblank_ctor(void *data, u32 size, struct nvkm_notify *);
+void nouveau_disp_vblank(struct nouveau_disp *, int head);
+int nouveau_disp_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 7a1ebdfa9e1b..ddf1760c4400 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
@@ -32,77 +33,26 @@
#include "nv50.h"
int
-nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
+nv50_sor_power(NV50_DISP_MTHD_V1)
{
- const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
- const u32 soff = (or * 0x800);
+ union {
+ struct nv50_disp_sor_pwr_v0 v0;
+ } *args = data;
+ const u32 soff = outp->or * 0x800;
+ u32 stat;
+ int ret;
+
+ nv_ioctl(object, "disp sor pwr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "disp sor pwr vers %d state %d\n",
+ args->v0.version, args->v0.state);
+ stat = !!args->v0.state;
+ } else
+ return ret;
+
nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
return 0;
}
-
-int
-nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
-{
- struct nv50_disp_priv *priv = (void *)object->engine;
- const u8 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
- const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
- const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
- const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
- const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
- struct nvkm_output *outp = NULL, *temp;
- u32 data;
- int ret = -EINVAL;
-
- if (size < sizeof(u32))
- return -EINVAL;
- data = *(u32 *)args;
-
- list_for_each_entry(temp, &priv->base.outp, head) {
- if ((temp->info.hasht & 0xff) == type &&
- (temp->info.hashm & mask) == mask) {
- outp = temp;
- break;
- }
- }
-
- switch (mthd & ~0x3f) {
- case NV50_DISP_SOR_PWR:
- ret = priv->sor.power(priv, or, data);
- break;
- case NVA3_DISP_SOR_HDA_ELD:
- ret = priv->sor.hda_eld(priv, or, args, size);
- break;
- case NV84_DISP_SOR_HDMI_PWR:
- ret = priv->sor.hdmi(priv, head, or, data);
- break;
- case NV50_DISP_SOR_LVDS_SCRIPT:
- priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
- ret = 0;
- break;
- case NV94_DISP_SOR_DP_PWR:
- if (outp) {
- struct nvkm_output_dp *outpdp = (void *)outp;
- switch (data) {
- case NV94_DISP_SOR_DP_PWR_STATE_OFF:
- nouveau_event_put(outpdp->irq);
- ((struct nvkm_output_dp_impl *)nv_oclass(outp))
- ->lnk_pwr(outpdp, 0);
- atomic_set(&outpdp->lt.done, 0);
- break;
- case NV94_DISP_SOR_DP_PWR_STATE_ON:
- nvkm_output_dp_train(&outpdp->base, 0, true);
- break;
- default:
- return -EINVAL;
- }
- }
- break;
- default:
- BUG_ON(1);
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index 05487cda84a8..39f85d627336 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 97f0e9cd3d40..7b7bbc3e459e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index 5103e88d1877..e1500f77a56a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -23,98 +23,143 @@
*/
#include <core/object.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
+#include <subdev/instmem.h>
+
+#include "priv.h"
static int
-nouveau_dmaobj_ctor(struct nouveau_object *parent,
+nvkm_dmaobj_bind(struct nouveau_dmaobj *dmaobj, struct nouveau_object *parent,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ const struct nvkm_dmaeng_impl *impl = (void *)
+ nv_oclass(nv_object(dmaobj)->engine);
+ int ret = 0;
+
+ if (nv_object(dmaobj) == parent) { /* ctor bind */
+ if (nv_mclass(parent->parent) == NV_DEVICE) {
+ /* delayed, or no, binding */
+ return 0;
+ }
+ ret = impl->bind(dmaobj, parent, pgpuobj);
+ if (ret == 0)
+ nouveau_object_ref(NULL, &parent);
+ return ret;
+ }
+
+ return impl->bind(dmaobj, parent, pgpuobj);
+}
+
+int
+nvkm_dmaobj_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
+ struct nouveau_oclass *oclass, void **pdata, u32 *psize,
+ int length, void **pobject)
{
- struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct nv_dma_v0 v0;
+ } *args = *pdata;
+ struct nouveau_instmem *instmem = nouveau_instmem(parent);
+ struct nouveau_client *client = nouveau_client(parent);
+ struct nouveau_device *device = nv_device(parent);
+ struct nouveau_fb *pfb = nouveau_fb(parent);
struct nouveau_dmaobj *dmaobj;
- struct nouveau_gpuobj *gpuobj;
- struct nv_dma_class *args = data;
+ void *data = *pdata;
+ u32 size = *psize;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
-
- ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
- *pobject = nv_object(dmaobj);
+ ret = nouveau_object_create_(parent, engine, oclass, 0, length, pobject);
+ dmaobj = *pobject;
if (ret)
return ret;
- switch (args->flags & NV_DMA_TARGET_MASK) {
- case NV_DMA_TARGET_VM:
+ nv_ioctl(parent, "create dma size %d\n", *psize);
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ nv_ioctl(parent, "create dma vers %d target %d access %d "
+ "start %016llx limit %016llx\n",
+ args->v0.version, args->v0.target, args->v0.access,
+ args->v0.start, args->v0.limit);
+ dmaobj->target = args->v0.target;
+ dmaobj->access = args->v0.access;
+ dmaobj->start = args->v0.start;
+ dmaobj->limit = args->v0.limit;
+ } else
+ return ret;
+
+ *pdata = data;
+ *psize = size;
+
+ if (dmaobj->start > dmaobj->limit)
+ return -EINVAL;
+
+ switch (dmaobj->target) {
+ case NV_DMA_V0_TARGET_VM:
dmaobj->target = NV_MEM_TARGET_VM;
break;
- case NV_DMA_TARGET_VRAM:
+ case NV_DMA_V0_TARGET_VRAM:
+ if (!client->super) {
+ if (dmaobj->limit >= pfb->ram->size - instmem->reserved)
+ return -EACCES;
+ if (device->card_type >= NV_50)
+ return -EACCES;
+ }
dmaobj->target = NV_MEM_TARGET_VRAM;
break;
- case NV_DMA_TARGET_PCI:
+ case NV_DMA_V0_TARGET_PCI:
+ if (!client->super)
+ return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI;
break;
- case NV_DMA_TARGET_PCI_US:
- case NV_DMA_TARGET_AGP:
+ case NV_DMA_V0_TARGET_PCI_US:
+ case NV_DMA_V0_TARGET_AGP:
+ if (!client->super)
+ return -EACCES;
dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
break;
default:
return -EINVAL;
}
- switch (args->flags & NV_DMA_ACCESS_MASK) {
- case NV_DMA_ACCESS_VM:
+ switch (dmaobj->access) {
+ case NV_DMA_V0_ACCESS_VM:
dmaobj->access = NV_MEM_ACCESS_VM;
break;
- case NV_DMA_ACCESS_RD:
+ case NV_DMA_V0_ACCESS_RD:
dmaobj->access = NV_MEM_ACCESS_RO;
break;
- case NV_DMA_ACCESS_WR:
+ case NV_DMA_V0_ACCESS_WR:
dmaobj->access = NV_MEM_ACCESS_WO;
break;
- case NV_DMA_ACCESS_RDWR:
+ case NV_DMA_V0_ACCESS_RDWR:
dmaobj->access = NV_MEM_ACCESS_RW;
break;
default:
return -EINVAL;
}
- dmaobj->start = args->start;
- dmaobj->limit = args->limit;
- dmaobj->conf0 = args->conf0;
-
- switch (nv_mclass(parent)) {
- case NV_DEVICE_CLASS:
- /* delayed, or no, binding */
- break;
- default:
- ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
- if (ret == 0) {
- nouveau_object_ref(NULL, pobject);
- *pobject = nv_object(gpuobj);
- }
- break;
- }
-
return ret;
}
-static struct nouveau_ofuncs
-nouveau_dmaobj_ofuncs = {
- .ctor = nouveau_dmaobj_ctor,
- .dtor = nouveau_object_destroy,
- .init = nouveau_object_init,
- .fini = nouveau_object_fini,
-};
-
-struct nouveau_oclass
-nouveau_dmaobj_sclass[] = {
- { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
- { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
- { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
- {}
-};
+int
+_nvkm_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ const struct nvkm_dmaeng_impl *impl = (void *)oclass;
+ struct nouveau_dmaeng *dmaeng;
+ int ret;
+
+ ret = nouveau_engine_create(parent, engine, oclass, true, "DMAOBJ",
+ "dmaobj", &dmaeng);
+ *pobject = nv_object(dmaeng);
+ if (ret)
+ return ret;
+
+ nv_engine(dmaeng)->sclass = impl->sclass;
+ dmaeng->bind = nvkm_dmaobj_bind;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 027d8217c0fa..20c9dbfe3b2e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -23,121 +23,143 @@
*/
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
#include <subdev/vm/nv04.h>
-#include <engine/dmaobj.h>
+#include "priv.h"
-struct nv04_dmaeng_priv {
- struct nouveau_dmaeng base;
+struct nv04_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ bool clone;
+ u32 flags0;
+ u32 flags2;
};
static int
-nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nv04_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaeng);
+ struct nv04_dmaobj_priv *priv = (void *)dmaobj;
struct nouveau_gpuobj *gpuobj;
- u32 flags0 = nv_mclass(dmaobj);
- u32 flags2 = 0x00000000;
- u64 offset = dmaobj->start & 0xfffff000;
- u64 adjust = dmaobj->start & 0x00000fff;
- u32 length = dmaobj->limit - dmaobj->start;
+ u64 offset = priv->base.start & 0xfffff000;
+ u64 adjust = priv->base.start & 0x00000fff;
+ u32 length = priv->base.limit - priv->base.start;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NV03_CHANNEL_DMA_CLASS:
- case NV10_CHANNEL_DMA_CLASS:
- case NV17_CHANNEL_DMA_CLASS:
- case NV40_CHANNEL_DMA_CLASS:
+ case NV03_CHANNEL_DMA:
+ case NV10_CHANNEL_DMA:
+ case NV17_CHANNEL_DMA:
+ case NV40_CHANNEL_DMA:
break;
default:
return -EINVAL;
}
}
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
- struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
- if (!dmaobj->start)
- return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
- offset = nv_ro32(pgt, 8 + (offset >> 10));
- offset &= 0xfffff000;
- }
+ if (priv->clone) {
+ struct nv04_vmmgr_priv *vmm = nv04_vmmgr(dmaobj);
+ struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
+ if (!dmaobj->start)
+ return nouveau_gpuobj_dup(parent, pgt, pgpuobj);
+ offset = nv_ro32(pgt, 8 + (offset >> 10));
+ offset &= 0xfffff000;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
+ *pgpuobj = gpuobj;
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, priv->flags0 | (adjust << 20));
+ nv_wo32(*pgpuobj, 0x04, length);
+ nv_wo32(*pgpuobj, 0x08, priv->flags2 | offset);
+ nv_wo32(*pgpuobj, 0x0c, priv->flags2 | offset);
+ }
+
+ return ret;
+}
+
+static int
+nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nv04_vmmgr_priv *vmm = nv04_vmmgr(engine);
+ struct nv04_dmaobj_priv *priv;
+ int ret;
+
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
+ *pobject = nv_object(priv);
+ if (ret || (ret = -ENOSYS, size))
+ return ret;
- dmaobj->target = NV_MEM_TARGET_PCI;
- dmaobj->access = NV_MEM_ACCESS_RW;
+ if (priv->base.target == NV_MEM_TARGET_VM) {
+ if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass)
+ priv->clone = true;
+ priv->base.target = NV_MEM_TARGET_PCI;
+ priv->base.access = NV_MEM_ACCESS_RW;
}
- switch (dmaobj->target) {
+ priv->flags0 = nv_mclass(priv);
+ switch (priv->base.target) {
case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00003000;
+ priv->flags0 |= 0x00003000;
break;
case NV_MEM_TARGET_PCI:
- flags0 |= 0x00023000;
+ priv->flags0 |= 0x00023000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00033000;
+ priv->flags0 |= 0x00033000;
break;
default:
return -EINVAL;
}
- switch (dmaobj->access) {
+ switch (priv->base.access) {
case NV_MEM_ACCESS_RO:
- flags0 |= 0x00004000;
+ priv->flags0 |= 0x00004000;
break;
case NV_MEM_ACCESS_WO:
- flags0 |= 0x00008000;
+ priv->flags0 |= 0x00008000;
case NV_MEM_ACCESS_RW:
- flags2 |= 0x00000002;
+ priv->flags2 |= 0x00000002;
break;
default:
return -EINVAL;
}
- ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
- *pgpuobj = gpuobj;
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
- nv_wo32(*pgpuobj, 0x04, length);
- nv_wo32(*pgpuobj, 0x08, flags2 | offset);
- nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
- }
-
- return ret;
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static int
-nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv04_dmaeng_priv *priv;
- int ret;
-
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static struct nouveau_ofuncs
+nv04_dmaobj_ofuncs = {
+ .ctor = nv04_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
+};
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nv04_dmaobj_bind;
- return 0;
-}
+static struct nouveau_oclass
+nv04_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nv04_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nv04_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nv04_dmaobj_ofuncs },
+ {}
+};
-struct nouveau_oclass
-nv04_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0x04),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv04_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
+struct nouveau_oclass *
+nv04_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0x04),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
},
-};
+ .sclass = nv04_dmaeng_sclass,
+ .bind = nv04_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 750183f7c057..a740ddba2ee2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -22,140 +22,176 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
-struct nv50_dmaeng_priv {
- struct nouveau_dmaeng base;
+#include "priv.h"
+
+struct nv50_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ u32 flags0;
+ u32 flags5;
};
static int
-nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nv50_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags0 = nv_mclass(dmaobj);
- u32 flags5 = 0x00000000;
+ struct nv50_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NV50_CHANNEL_DMA_CLASS:
- case NV84_CHANNEL_DMA_CLASS:
- case NV50_CHANNEL_IND_CLASS:
- case NV84_CHANNEL_IND_CLASS:
- case NV50_DISP_MAST_CLASS:
- case NV84_DISP_MAST_CLASS:
- case NV94_DISP_MAST_CLASS:
- case NVA0_DISP_MAST_CLASS:
- case NVA3_DISP_MAST_CLASS:
- case NV50_DISP_SYNC_CLASS:
- case NV84_DISP_SYNC_CLASS:
- case NV94_DISP_SYNC_CLASS:
- case NVA0_DISP_SYNC_CLASS:
- case NVA3_DISP_SYNC_CLASS:
- case NV50_DISP_OVLY_CLASS:
- case NV84_DISP_OVLY_CLASS:
- case NV94_DISP_OVLY_CLASS:
- case NVA0_DISP_OVLY_CLASS:
- case NVA3_DISP_OVLY_CLASS:
+ case NV40_CHANNEL_DMA:
+ case NV50_CHANNEL_GPFIFO:
+ case G82_CHANNEL_GPFIFO:
+ case NV50_DISP_CORE_CHANNEL_DMA:
+ case G82_DISP_CORE_CHANNEL_DMA:
+ case GT206_DISP_CORE_CHANNEL_DMA:
+ case GT200_DISP_CORE_CHANNEL_DMA:
+ case GT214_DISP_CORE_CHANNEL_DMA:
+ case NV50_DISP_BASE_CHANNEL_DMA:
+ case G82_DISP_BASE_CHANNEL_DMA:
+ case GT200_DISP_BASE_CHANNEL_DMA:
+ case GT214_DISP_BASE_CHANNEL_DMA:
+ case NV50_DISP_OVERLAY_CHANNEL_DMA:
+ case G82_DISP_OVERLAY_CHANNEL_DMA:
+ case GT200_DISP_OVERLAY_CHANNEL_DMA:
+ case GT214_DISP_OVERLAY_CHANNEL_DMA:
break;
default:
return -EINVAL;
}
}
- if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
- dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
- dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
- dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
+ upper_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, priv->flags5);
+ }
+
+ return ret;
+}
+
+static int
+nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct nv50_dma_v0 v0;
+ } *args;
+ struct nv50_dmaobj_priv *priv;
+ u32 user, part, comp, kind;
+ int ret;
+
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+ args = data;
+
+ nv_ioctl(parent, "create nv50 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create nv50 dma vers %d priv %d part %d "
+ "comp %d kind %02x\n", args->v0.version,
+ args->v0.priv, args->v0.part, args->v0.comp,
+ args->v0.kind);
+ user = args->v0.priv;
+ part = args->v0.part;
+ comp = args->v0.comp;
+ kind = args->v0.kind;
+ } else
+ if (size == 0) {
+ if (priv->base.target != NV_MEM_TARGET_VM) {
+ user = NV50_DMA_V0_PRIV_US;
+ part = NV50_DMA_V0_PART_256;
+ comp = NV50_DMA_V0_COMP_NONE;
+ kind = NV50_DMA_V0_KIND_PITCH;
} else {
- dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
- dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
- dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
- dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
+ user = NV50_DMA_V0_PRIV_VM;
+ part = NV50_DMA_V0_PART_VM;
+ comp = NV50_DMA_V0_COMP_VM;
+ kind = NV50_DMA_V0_KIND_VM;
}
- }
+ } else
+ return ret;
- flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
- flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
- flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
- flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
+ if (user > 2 || part > 2 || comp > 3 || kind > 0x7f)
+ return -EINVAL;
+ priv->flags0 = (comp << 29) | (kind << 22) | (user << 20);
+ priv->flags5 = (part << 16);
- switch (dmaobj->target) {
+ switch (priv->base.target) {
case NV_MEM_TARGET_VM:
- flags0 |= 0x00000000;
+ priv->flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00010000;
+ priv->flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags0 |= 0x00020000;
+ priv->flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00030000;
+ priv->flags0 |= 0x00030000;
break;
default:
return -EINVAL;
}
- switch (dmaobj->access) {
+ switch (priv->base.access) {
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags0 |= 0x00040000;
+ priv->flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags0 |= 0x00080000;
+ priv->flags0 |= 0x00080000;
break;
+ default:
+ return -EINVAL;
}
- ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0);
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
- upper_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, flags5);
- }
-
- return ret;
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static int
-nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv50_dmaeng_priv *priv;
- int ret;
-
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static struct nouveau_ofuncs
+nv50_dmaobj_ofuncs = {
+ .ctor = nv50_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
+};
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nv50_dmaobj_bind;
- return 0;
-}
+static struct nouveau_oclass
+nv50_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nv50_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nv50_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nv50_dmaobj_ofuncs },
+ {}
+};
-struct nouveau_oclass
-nv50_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0x50),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv50_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
+struct nouveau_oclass *
+nv50_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0x50),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
},
-};
+ .sclass = nv50_dmaeng_sclass,
+ .bind = nv50_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index cd3970d03b80..88ec33b20048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,32 +22,35 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/device.h>
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
-struct nvc0_dmaeng_priv {
- struct nouveau_dmaeng base;
+#include "priv.h"
+
+struct nvc0_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ u32 flags0;
+ u32 flags5;
};
static int
-nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nvc0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags0 = nv_mclass(dmaobj);
- u32 flags5 = 0x00000000;
+ struct nvc0_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NVA3_DISP_MAST_CLASS:
- case NVA3_DISP_SYNC_CLASS:
- case NVA3_DISP_OVLY_CLASS:
+ case GT214_DISP_CORE_CHANNEL_DMA:
+ case GT214_DISP_BASE_CHANNEL_DMA:
+ case GT214_DISP_OVERLAY_CHANNEL_DMA:
break;
default:
return -EINVAL;
@@ -55,89 +58,122 @@ nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
} else
return 0;
- if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
- dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj));
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 |
+ upper_32_bits(priv->base.start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, priv->flags5);
+ }
+
+ return ret;
+}
+
+static int
+nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct gf100_dma_v0 v0;
+ } *args;
+ struct nvc0_dmaobj_priv *priv;
+ u32 kind, user, unkn;
+ int ret;
+
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+ args = data;
+
+ nv_ioctl(parent, "create gf100 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create gf100 dma vers %d priv %d kind %02x\n",
+ args->v0.version, args->v0.priv, args->v0.kind);
+ kind = args->v0.kind;
+ user = args->v0.priv;
+ unkn = 0;
+ } else
+ if (size == 0) {
+ if (priv->base.target != NV_MEM_TARGET_VM) {
+ kind = GF100_DMA_V0_KIND_PITCH;
+ user = GF100_DMA_V0_PRIV_US;
+ unkn = 2;
} else {
- dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
- dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
- dmaobj->conf0 |= 0x00020000;
+ kind = GF100_DMA_V0_KIND_VM;
+ user = GF100_DMA_V0_PRIV_VM;
+ unkn = 0;
}
- }
+ } else
+ return ret;
- flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
- flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
- flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
+ if (user > 2)
+ return -EINVAL;
+ priv->flags0 |= (kind << 22) | (user << 20);
+ priv->flags5 |= (unkn << 16);
- switch (dmaobj->target) {
+ switch (priv->base.target) {
case NV_MEM_TARGET_VM:
- flags0 |= 0x00000000;
+ priv->flags0 |= 0x00000000;
break;
case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00010000;
+ priv->flags0 |= 0x00010000;
break;
case NV_MEM_TARGET_PCI:
- flags0 |= 0x00020000;
+ priv->flags0 |= 0x00020000;
break;
case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00030000;
+ priv->flags0 |= 0x00030000;
break;
default:
return -EINVAL;
}
- switch (dmaobj->access) {
+ switch (priv->base.access) {
case NV_MEM_ACCESS_VM:
break;
case NV_MEM_ACCESS_RO:
- flags0 |= 0x00040000;
+ priv->flags0 |= 0x00040000;
break;
case NV_MEM_ACCESS_WO:
case NV_MEM_ACCESS_RW:
- flags0 |= 0x00080000;
+ priv->flags0 |= 0x00080000;
break;
}
- ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
- if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0);
- nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
- nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
- upper_32_bits(dmaobj->start));
- nv_wo32(*pgpuobj, 0x10, 0x00000000);
- nv_wo32(*pgpuobj, 0x14, flags5);
- }
-
- return ret;
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-static int
-nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_dmaeng_priv *priv;
- int ret;
-
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
+static struct nouveau_ofuncs
+nvc0_dmaobj_ofuncs = {
+ .ctor = nvc0_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
+};
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nvc0_dmaobj_bind;
- return 0;
-}
+static struct nouveau_oclass
+nvc0_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nvc0_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nvc0_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nvc0_dmaobj_ofuncs },
+ {}
+};
-struct nouveau_oclass
-nvc0_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
+struct nouveau_oclass *
+nvc0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
},
-};
+ .sclass = nvc0_dmaeng_sclass,
+ .bind = nvc0_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 1cfb3bb90131..3fc4f0b0eaca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -22,40 +22,40 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/device.h>
#include <core/gpuobj.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/fb.h>
-#include <engine/dmaobj.h>
-struct nvd0_dmaeng_priv {
- struct nouveau_dmaeng base;
+#include "priv.h"
+
+struct nvd0_dmaobj_priv {
+ struct nouveau_dmaobj base;
+ u32 flags0;
};
static int
-nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **pgpuobj)
{
- u32 flags0 = 0x00000000;
+ struct nvd0_dmaobj_priv *priv = (void *)dmaobj;
int ret;
if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
switch (nv_mclass(parent->parent)) {
- case NVD0_DISP_MAST_CLASS:
- case NVD0_DISP_SYNC_CLASS:
- case NVD0_DISP_OVLY_CLASS:
- case NVE0_DISP_MAST_CLASS:
- case NVE0_DISP_SYNC_CLASS:
- case NVE0_DISP_OVLY_CLASS:
- case NVF0_DISP_MAST_CLASS:
- case NVF0_DISP_SYNC_CLASS:
- case NVF0_DISP_OVLY_CLASS:
- case GM107_DISP_MAST_CLASS:
- case GM107_DISP_SYNC_CLASS:
- case GM107_DISP_OVLY_CLASS:
+ case GF110_DISP_CORE_CHANNEL_DMA:
+ case GK104_DISP_CORE_CHANNEL_DMA:
+ case GK110_DISP_CORE_CHANNEL_DMA:
+ case GM107_DISP_CORE_CHANNEL_DMA:
+ case GF110_DISP_BASE_CHANNEL_DMA:
+ case GK104_DISP_BASE_CHANNEL_DMA:
+ case GK110_DISP_BASE_CHANNEL_DMA:
+ case GF110_DISP_OVERLAY_CONTROL_DMA:
+ case GK104_DISP_OVERLAY_CONTROL_DMA:
break;
default:
return -EINVAL;
@@ -63,33 +63,11 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
} else
return 0;
- if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
- if (dmaobj->target == NV_MEM_TARGET_VM) {
- dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
- dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
- } else {
- dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
- dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
- }
- }
-
- flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
- flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
-
- switch (dmaobj->target) {
- case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00000009;
- break;
- default:
- return -EINVAL;
- break;
- }
-
ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
if (ret == 0) {
- nv_wo32(*pgpuobj, 0x00, flags0);
- nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
- nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
+ nv_wo32(*pgpuobj, 0x00, priv->flags0);
+ nv_wo32(*pgpuobj, 0x04, priv->base.start >> 8);
+ nv_wo32(*pgpuobj, 0x08, priv->base.limit >> 8);
nv_wo32(*pgpuobj, 0x0c, 0x00000000);
nv_wo32(*pgpuobj, 0x10, 0x00000000);
nv_wo32(*pgpuobj, 0x14, 0x00000000);
@@ -99,30 +77,91 @@ nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
}
static int
-nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+nvd0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nvd0_dmaeng_priv *priv;
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ union {
+ struct gf110_dma_v0 v0;
+ } *args;
+ struct nvd0_dmaobj_priv *priv;
+ u32 kind, page;
int ret;
- ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ ret = nvkm_dmaobj_create(parent, engine, oclass, &data, &size, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
+ args = data;
- nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
- priv->base.bind = nvd0_dmaobj_bind;
- return 0;
+ nv_ioctl(parent, "create gf110 dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create gf100 dma vers %d page %d kind %02x\n",
+ args->v0.version, args->v0.page, args->v0.kind);
+ kind = args->v0.kind;
+ page = args->v0.page;
+ } else
+ if (size == 0) {
+ if (priv->base.target != NV_MEM_TARGET_VM) {
+ kind = GF110_DMA_V0_KIND_PITCH;
+ page = GF110_DMA_V0_PAGE_SP;
+ } else {
+ kind = GF110_DMA_V0_KIND_VM;
+ page = GF110_DMA_V0_PAGE_LP;
+ }
+ } else
+ return ret;
+
+ if (page > 1)
+ return -EINVAL;
+ priv->flags0 = (kind << 20) | (page << 6);
+
+ switch (priv->base.target) {
+ case NV_MEM_TARGET_VRAM:
+ priv->flags0 |= 0x00000009;
+ break;
+ case NV_MEM_TARGET_VM:
+ case NV_MEM_TARGET_PCI:
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ /* XXX: don't currently know how to construct a real one
+ * of these. we only use them to represent pushbufs
+ * on these chipsets, and the classes that use them
+ * deal with the target themselves.
+ */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return dmaeng->bind(&priv->base, nv_object(priv), (void *)pobject);
}
-struct nouveau_oclass
-nvd0_dmaeng_oclass = {
- .handle = NV_ENGINE(DMAOBJ, 0xd0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_dmaeng_ctor,
- .dtor = _nouveau_dmaeng_dtor,
- .init = _nouveau_dmaeng_init,
- .fini = _nouveau_dmaeng_fini,
- },
+static struct nouveau_ofuncs
+nvd0_dmaobj_ofuncs = {
+ .ctor = nvd0_dmaobj_ctor,
+ .dtor = _nvkm_dmaobj_dtor,
+ .init = _nvkm_dmaobj_init,
+ .fini = _nvkm_dmaobj_fini,
};
+
+static struct nouveau_oclass
+nvd0_dmaeng_sclass[] = {
+ { NV_DMA_FROM_MEMORY, &nvd0_dmaobj_ofuncs },
+ { NV_DMA_TO_MEMORY, &nvd0_dmaobj_ofuncs },
+ { NV_DMA_IN_MEMORY, &nvd0_dmaobj_ofuncs },
+ {}
+};
+
+struct nouveau_oclass *
+nvd0_dmaeng_oclass = &(struct nvkm_dmaeng_impl) {
+ .base.handle = NV_ENGINE(DMAOBJ, 0xd0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nvkm_dmaeng_ctor,
+ .dtor = _nvkm_dmaeng_dtor,
+ .init = _nvkm_dmaeng_init,
+ .fini = _nvkm_dmaeng_fini,
+ },
+ .sclass = nvd0_dmaeng_sclass,
+ .bind = nvd0_dmaobj_bind,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
new file mode 100644
index 000000000000..36f743866937
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/priv.h
@@ -0,0 +1,30 @@
+#ifndef __NVKM_DMAOBJ_PRIV_H__
+#define __NVKM_DMAOBJ_PRIV_H__
+
+#include <engine/dmaobj.h>
+
+#define nvkm_dmaobj_create(p,e,c,pa,sa,d) \
+ nvkm_dmaobj_create_((p), (e), (c), (pa), (sa), sizeof(**d), (void **)d)
+
+int nvkm_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void **, u32 *,
+ int, void **);
+#define _nvkm_dmaobj_dtor nouveau_object_destroy
+#define _nvkm_dmaobj_init nouveau_object_init
+#define _nvkm_dmaobj_fini nouveau_object_fini
+
+int _nvkm_dmaeng_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+#define _nvkm_dmaeng_dtor _nouveau_engine_dtor
+#define _nvkm_dmaeng_init _nouveau_engine_init
+#define _nvkm_dmaeng_fini _nouveau_engine_fini
+
+struct nvkm_dmaeng_impl {
+ struct nouveau_oclass base;
+ struct nouveau_oclass *sclass;
+ int (*bind)(struct nouveau_dmaobj *, struct nouveau_object *,
+ struct nouveau_gpuobj **);
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index 56ed3d73bf8e..0f999fc45ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -26,11 +26,30 @@
#include <core/object.h>
#include <core/handle.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/event.h>
#include <engine/dmaobj.h>
#include <engine/fifo.h>
+static int
+nouveau_fifo_event_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ if (size == 0) {
+ notify->size = 0;
+ notify->types = 1;
+ notify->index = 0;
+ return 0;
+ }
+ return -ENOSYS;
+}
+
+static const struct nvkm_event_func
+nouveau_fifo_event_func = {
+ .ctor = nouveau_fifo_event_ctor,
+};
+
int
nouveau_fifo_channel_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
@@ -59,14 +78,14 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
dmaeng = (void *)chan->pushdma->base.engine;
switch (chan->pushdma->base.oclass->handle) {
- case NV_DMA_FROM_MEMORY_CLASS:
- case NV_DMA_IN_MEMORY_CLASS:
+ case NV_DMA_FROM_MEMORY:
+ case NV_DMA_IN_MEMORY:
break;
default:
return -EINVAL;
}
- ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ ret = dmaeng->bind(chan->pushdma, parent, &chan->pushgpu);
if (ret)
return ret;
@@ -85,15 +104,10 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
return -ENOSPC;
}
- /* map fifo control registers */
- chan->user = ioremap(nv_device_resource_start(device, bar) + addr +
- (chan->chid * size), size);
- if (!chan->user)
- return -EFAULT;
-
- nouveau_event_trigger(priv->cevent, 1, 0);
-
+ chan->addr = nv_device_resource_start(device, bar) +
+ addr + size * chan->chid;
chan->size = size;
+ nvkm_event_send(&priv->cevent, 1, 0, NULL, 0);
return 0;
}
@@ -103,7 +117,8 @@ nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
unsigned long flags;
- iounmap(chan->user);
+ if (chan->user)
+ iounmap(chan->user);
spin_lock_irqsave(&priv->lock, flags);
priv->channel[chan->chid] = NULL;
@@ -121,10 +136,24 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
nouveau_fifo_channel_destroy(chan);
}
+int
+_nouveau_fifo_channel_map(struct nouveau_object *object, u64 *addr, u32 *size)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ *addr = chan->addr;
+ *size = chan->size;
+ return 0;
+}
+
u32
_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
{
struct nouveau_fifo_chan *chan = (void *)object;
+ if (unlikely(!chan->user)) {
+ chan->user = ioremap(chan->addr, chan->size);
+ if (WARN_ON_ONCE(chan->user == NULL))
+ return 0;
+ }
return ioread32_native(chan->user + addr);
}
@@ -132,9 +161,57 @@ void
_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
{
struct nouveau_fifo_chan *chan = (void *)object;
+ if (unlikely(!chan->user)) {
+ chan->user = ioremap(chan->addr, chan->size);
+ if (WARN_ON_ONCE(chan->user == NULL))
+ return;
+ }
iowrite32_native(data, chan->user + addr);
}
+int
+nouveau_fifo_uevent_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ union {
+ struct nvif_notify_uevent_req none;
+ } *req = data;
+ int ret;
+
+ if (nvif_unvers(req->none)) {
+ notify->size = sizeof(struct nvif_notify_uevent_rep);
+ notify->types = 1;
+ notify->index = 0;
+ }
+
+ return ret;
+}
+
+void
+nouveau_fifo_uevent(struct nouveau_fifo *fifo)
+{
+ struct nvif_notify_uevent_rep rep = {
+ };
+ nvkm_event_send(&fifo->uevent, 1, 0, &rep, sizeof(rep));
+}
+
+int
+_nouveau_fifo_channel_ntfy(struct nouveau_object *object, u32 type,
+ struct nvkm_event **event)
+{
+ struct nouveau_fifo *fifo = (void *)object->engine;
+ switch (type) {
+ case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
+ if (nv_mclass(object) >= G82_CHANNEL_DMA) {
+ *event = &fifo->uevent;
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
static int
nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
{
@@ -168,8 +245,8 @@ void
nouveau_fifo_destroy(struct nouveau_fifo *priv)
{
kfree(priv->channel);
- nouveau_event_destroy(&priv->uevent);
- nouveau_event_destroy(&priv->cevent);
+ nvkm_event_fini(&priv->uevent);
+ nvkm_event_fini(&priv->cevent);
nouveau_engine_destroy(&priv->base);
}
@@ -194,11 +271,7 @@ nouveau_fifo_create_(struct nouveau_object *parent,
if (!priv->channel)
return -ENOMEM;
- ret = nouveau_event_create(1, 1, &priv->cevent);
- if (ret)
- return ret;
-
- ret = nouveau_event_create(1, 1, &priv->uevent);
+ ret = nvkm_event_init(&nouveau_fifo_event_func, 1, 1, &priv->cevent);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index c61b16a63884..5ae6a43893b5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/namedb.h>
#include <core/handle.h>
@@ -117,16 +118,23 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->pushbuf,
+ 0x10000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR), &chan);
@@ -134,13 +142,15 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -242,13 +252,15 @@ nv04_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv04_fifo_sclass[] = {
- { NV03_CHANNEL_DMA_CLASS, &nv04_fifo_ofuncs },
+ { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
{}
};
@@ -539,7 +551,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status & 0x40000000) {
- nouveau_event_trigger(priv->base.uevent, 1, 0);
+ nouveau_fifo_uevent(&priv->base);
nv_wr32(priv, 0x002100, 0x40000000);
status &= ~0x40000000;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 571a22aa1ae5..2a32add51c81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
@@ -59,16 +60,23 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->pushbuf,
+ 0x10000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR), &chan);
@@ -76,13 +84,15 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -100,13 +110,15 @@ nv10_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv10_fifo_sclass[] = {
- { NV10_CHANNEL_DMA_CLASS, &nv10_fifo_ofuncs },
+ { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index f25760209316..12d76c8adb23 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
@@ -64,16 +65,23 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
- 0x10000, args->pushbuf,
+ 0x10000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -83,13 +91,15 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->object_attach = nv04_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 64;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -107,13 +117,15 @@ nv17_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv17_fifo_sclass[] = {
- { NV17_CHANNEL_DMA_CLASS, &nv17_fifo_ofuncs },
+ { NV17_CHANNEL_DMA, &nv17_fifo_ofuncs },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 343487ed2238..9f49c3a24dc6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -22,8 +22,9 @@
* Authors: Ben Skeggs
*/
-#include <core/os.h>
-#include <core/class.h>
+#include <core/client.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/engctx.h>
#include <core/ramht.h>
@@ -182,16 +183,23 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nv04_fifo_priv *priv = (void *)engine;
struct nv04_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x1000, args->pushbuf,
+ 0x1000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -200,14 +208,16 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nv40_fifo_context_attach;
nv_parent(chan)->context_detach = nv40_fifo_context_detach;
nv_parent(chan)->object_attach = nv40_fifo_object_attach;
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 128;
- nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
- nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
@@ -226,13 +236,15 @@ nv40_fifo_ofuncs = {
.dtor = nv04_fifo_chan_dtor,
.init = nv04_fifo_chan_init,
.fini = nv04_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv40_fifo_sclass[] = {
- { NV40_CHANNEL_DMA_CLASS, &nv40_fifo_ofuncs },
+ { NV40_CHANNEL_DMA, &nv40_fifo_ofuncs },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index e6352bd5b4ff..5d1e86bc244c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -25,7 +25,8 @@
#include <core/client.h>
#include <core/engctx.h>
#include <core/ramht.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -194,17 +195,24 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -213,6 +221,8 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
@@ -223,10 +233,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
if (ret)
return ret;
- nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x3c, 0x003f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
@@ -247,18 +257,26 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct nv50_channel_ind_class *args = data;
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
u64 ioffset, ilength;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -267,6 +285,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nv50_fifo_context_attach;
nv_parent(chan)->context_detach = nv50_fifo_context_detach;
nv_parent(chan)->object_attach = nv50_fifo_object_attach;
@@ -277,8 +297,8 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
if (ret)
return ret;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
@@ -343,8 +363,10 @@ nv50_fifo_ofuncs_dma = {
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_ofuncs
@@ -353,14 +375,16 @@ nv50_fifo_ofuncs_ind = {
.dtor = nv50_fifo_chan_dtor,
.init = nv50_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv50_fifo_sclass[] = {
- { NV50_CHANNEL_DMA_CLASS, &nv50_fifo_ofuncs_dma },
- { NV50_CHANNEL_IND_CLASS, &nv50_fifo_ofuncs_ind },
+ { NV50_CHANNEL_DMA, &nv50_fifo_ofuncs_dma },
+ { NV50_CHANNEL_GPFIFO, &nv50_fifo_ofuncs_ind },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 6e5ac16e5460..1f42996b354a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -27,7 +27,8 @@
#include <core/engctx.h>
#include <core/ramht.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -160,17 +161,24 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv03_channel_dma_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
- struct nv03_channel_dma_class *args = data;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel dma size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel dma vers %d pushbuf %08x "
+ "offset %016llx\n", args->v0.version,
+ args->v0.pushbuf, args->v0.offset);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -186,6 +194,8 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
&chan->ramht);
if (ret)
@@ -196,10 +206,10 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- nv_wo32(base->ramfc, 0x08, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x10, lower_32_bits(args->offset));
- nv_wo32(base->ramfc, 0x14, upper_32_bits(args->offset));
+ nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
+ nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x3c, 0x003f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
@@ -222,18 +232,26 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nv50_fifo_base *base = (void *)parent;
struct nv50_fifo_chan *chan;
- struct nv50_channel_ind_class *args = data;
u64 ioffset, ilength;
int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
- 0x2000, args->pushbuf,
+ 0x2000, args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_DMAOBJ) |
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
@@ -249,6 +267,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
ret = nouveau_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
&chan->ramht);
if (ret)
@@ -259,8 +279,8 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_parent(chan)->object_attach = nv84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
@@ -304,8 +324,10 @@ nv84_fifo_ofuncs_dma = {
.dtor = nv50_fifo_chan_dtor,
.init = nv84_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_ofuncs
@@ -314,14 +336,16 @@ nv84_fifo_ofuncs_ind = {
.dtor = nv50_fifo_chan_dtor,
.init = nv84_fifo_chan_init,
.fini = nv50_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nv84_fifo_sclass[] = {
- { NV84_CHANNEL_DMA_CLASS, &nv84_fifo_ofuncs_dma },
- { NV84_CHANNEL_IND_CLASS, &nv84_fifo_ofuncs_ind },
+ { G82_CHANNEL_DMA, &nv84_fifo_ofuncs_dma },
+ { G82_CHANNEL_GPFIFO, &nv84_fifo_ofuncs_ind },
{}
};
@@ -389,19 +413,26 @@ nv84_fifo_cclass = {
******************************************************************************/
static void
-nv84_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
+nv84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nv84_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x40000000, 0x40000000);
}
static void
-nv84_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
+nv84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nv84_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x40000000, 0x00000000);
}
+static const struct nvkm_event_func
+nv84_fifo_uevent_func = {
+ .ctor = nouveau_fifo_uevent_ctor,
+ .init = nv84_fifo_uevent_init,
+ .fini = nv84_fifo_uevent_fini,
+};
+
static int
nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -425,9 +456,9 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.uevent->enable = nv84_fifo_uevent_enable;
- priv->base.uevent->disable = nv84_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ ret = nvkm_event_init(&nv84_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ if (ret)
+ return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nv04_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index ae4a4dc5642a..1fe1f8fbda0c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -28,7 +28,8 @@
#include <core/gpuobj.h>
#include <core/engctx.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -187,20 +188,28 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nv50_channel_gpfifo_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nvc0_fifo_priv *priv = (void *)engine;
struct nvc0_fifo_base *base = (void *)parent;
struct nvc0_fifo_chan *chan;
- struct nv50_channel_ind_class *args = data;
u64 usermem, ioffset, ilength;
int ret, i;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength);
+ } else
+ return ret;
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x1000,
- args->pushbuf,
+ args->v0.pushbuf,
(1ULL << NVDEV_ENGINE_SW) |
(1ULL << NVDEV_ENGINE_GR) |
(1ULL << NVDEV_ENGINE_COPY0) |
@@ -212,12 +221,14 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
usermem = chan->base.chid * 0x1000;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
for (i = 0; i < 0x1000; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
@@ -291,13 +302,15 @@ nvc0_fifo_ofuncs = {
.dtor = _nouveau_fifo_channel_dtor,
.init = nvc0_fifo_chan_init,
.fini = nvc0_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nvc0_fifo_sclass[] = {
- { NVC0_CHANNEL_IND_CLASS, &nvc0_fifo_ofuncs },
+ { FERMI_CHANNEL_GPFIFO, &nvc0_fifo_ofuncs },
{}
};
@@ -654,7 +667,7 @@ nvc0_fifo_intr_fault(struct nvc0_fifo_priv *priv, int unit)
object = engctx;
while (object) {
switch (nv_mclass(object)) {
- case NVC0_CHANNEL_IND_CLASS:
+ case FERMI_CHANNEL_GPFIFO:
nvc0_fifo_recover(priv, engine, (void *)object);
break;
}
@@ -730,7 +743,7 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
- nouveau_event_trigger(priv->base.uevent, 1, 0);
+ nouveau_fifo_uevent(&priv->base);
ints &= ~1;
}
if (ints) {
@@ -827,19 +840,26 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nvc0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
+nvc0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nvc0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
}
static void
-nvc0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
+nvc0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nvc0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
}
+static const struct nvkm_event_func
+nvc0_fifo_uevent_func = {
+ .ctor = nouveau_fifo_uevent_ctor,
+ .init = nvc0_fifo_uevent_init,
+ .fini = nvc0_fifo_uevent_fini,
+};
+
static int
nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
@@ -877,9 +897,9 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.uevent->enable = nvc0_fifo_uevent_enable;
- priv->base.uevent->disable = nvc0_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ ret = nvkm_event_init(&nvc0_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ if (ret)
+ return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nvc0_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 298063edb92d..d2f0fd39c145 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -28,7 +28,8 @@
#include <core/gpuobj.h>
#include <core/engctx.h>
#include <core/event.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -216,46 +217,56 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct kepler_channel_gpfifo_a_v0 v0;
+ } *args = data;
struct nouveau_bar *bar = nouveau_bar(parent);
struct nve0_fifo_priv *priv = (void *)engine;
struct nve0_fifo_base *base = (void *)parent;
struct nve0_fifo_chan *chan;
- struct nve0_channel_ind_class *args = data;
u64 usermem, ioffset, ilength;
int ret, i;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create channel gpfifo vers %d pushbuf %08x "
+ "ioffset %016llx ilength %08x engine %08x\n",
+ args->v0.version, args->v0.pushbuf, args->v0.ioffset,
+ args->v0.ilength, args->v0.engine);
+ } else
+ return ret;
for (i = 0; i < FIFO_ENGINE_NR; i++) {
- if (args->engine & (1 << i)) {
+ if (args->v0.engine & (1 << i)) {
if (nouveau_engine(parent, fifo_engine[i].subdev)) {
- args->engine = (1 << i);
+ args->v0.engine = (1 << i);
break;
}
}
}
if (i == FIFO_ENGINE_NR) {
- nv_error(priv, "unsupported engines 0x%08x\n", args->engine);
+ nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine);
return -ENODEV;
}
ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
priv->user.bar.offset, 0x200,
- args->pushbuf,
+ args->v0.pushbuf,
fifo_engine[i].mask, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
+ args->v0.chid = chan->base.chid;
+
nv_parent(chan)->context_attach = nve0_fifo_context_attach;
nv_parent(chan)->context_detach = nve0_fifo_context_detach;
chan->engine = i;
usermem = chan->base.chid * 0x200;
- ioffset = args->ioffset;
- ilength = order_base_2(args->ilength / 8);
+ ioffset = args->v0.ioffset;
+ ilength = order_base_2(args->v0.ilength / 8);
for (i = 0; i < 0x200; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
@@ -325,13 +336,15 @@ nve0_fifo_ofuncs = {
.dtor = _nouveau_fifo_channel_dtor,
.init = nve0_fifo_chan_init,
.fini = nve0_fifo_chan_fini,
+ .map = _nouveau_fifo_channel_map,
.rd32 = _nouveau_fifo_channel_rd32,
.wr32 = _nouveau_fifo_channel_wr32,
+ .ntfy = _nouveau_fifo_channel_ntfy
};
static struct nouveau_oclass
nve0_fifo_sclass[] = {
- { NVE0_CHANNEL_IND_CLASS, &nve0_fifo_ofuncs },
+ { KEPLER_CHANNEL_GPFIFO_A, &nve0_fifo_ofuncs },
{}
};
@@ -769,7 +782,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
object = engctx;
while (object) {
switch (nv_mclass(object)) {
- case NVE0_CHANNEL_IND_CLASS:
+ case KEPLER_CHANNEL_GPFIFO_A:
nve0_fifo_recover(priv, engine, (void *)object);
break;
}
@@ -859,7 +872,7 @@ nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
static void
nve0_fifo_intr_engine(struct nve0_fifo_priv *priv)
{
- nouveau_event_trigger(priv->base.uevent, 1, 0);
+ nouveau_fifo_uevent(&priv->base);
}
static void
@@ -952,19 +965,26 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
static void
-nve0_fifo_uevent_enable(struct nouveau_event *event, int type, int index)
+nve0_fifo_uevent_init(struct nvkm_event *event, int type, int index)
{
- struct nve0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x80000000);
}
static void
-nve0_fifo_uevent_disable(struct nouveau_event *event, int type, int index)
+nve0_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
{
- struct nve0_fifo_priv *priv = event->priv;
- nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
+ struct nouveau_fifo *fifo = container_of(event, typeof(*fifo), uevent);
+ nv_mask(fifo, 0x002140, 0x80000000, 0x00000000);
}
+static const struct nvkm_event_func
+nve0_fifo_uevent_func = {
+ .ctor = nouveau_fifo_uevent_ctor,
+ .init = nve0_fifo_uevent_init,
+ .fini = nve0_fifo_uevent_fini,
+};
+
int
nve0_fifo_fini(struct nouveau_object *object, bool suspend)
{
@@ -1067,9 +1087,9 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- priv->base.uevent->enable = nve0_fifo_uevent_enable;
- priv->base.uevent->disable = nve0_fifo_uevent_disable;
- priv->base.uevent->priv = priv;
+ ret = nvkm_event_init(&nve0_fifo_uevent_func, 1, 1, &priv->base.uevent);
+ if (ret)
+ return ret;
nv_subdev(priv)->unit = 0x00000100;
nv_subdev(priv)->intr = nve0_fifo_intr;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c
new file mode 100644
index 000000000000..3adb7fe91772
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk110b.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "ctxnvc0.h"
+
+/*******************************************************************************
+ * PGRAPH context register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+gk110b_grctx_init_sm_0[] = {
+ { 0x419e04, 1, 0x04, 0x00000000 },
+ { 0x419e08, 1, 0x04, 0x0000001d },
+ { 0x419e0c, 1, 0x04, 0x00000000 },
+ { 0x419e10, 1, 0x04, 0x00001c02 },
+ { 0x419e44, 1, 0x04, 0x0013eff2 },
+ { 0x419e48, 1, 0x04, 0x00000000 },
+ { 0x419e4c, 1, 0x04, 0x0000007f },
+ { 0x419e50, 2, 0x04, 0x00000000 },
+ { 0x419e58, 1, 0x04, 0x00000001 },
+ { 0x419e5c, 3, 0x04, 0x00000000 },
+ { 0x419e68, 1, 0x04, 0x00000002 },
+ { 0x419e6c, 12, 0x04, 0x00000000 },
+ { 0x419eac, 1, 0x04, 0x00001f8f },
+ { 0x419eb0, 1, 0x04, 0x0db00d2f },
+ { 0x419eb8, 1, 0x04, 0x00000000 },
+ { 0x419ec8, 1, 0x04, 0x0001304f },
+ { 0x419f30, 4, 0x04, 0x00000000 },
+ { 0x419f40, 1, 0x04, 0x00000018 },
+ { 0x419f44, 3, 0x04, 0x00000000 },
+ { 0x419f58, 1, 0x04, 0x00000000 },
+ { 0x419f70, 1, 0x04, 0x00006300 },
+ { 0x419f78, 1, 0x04, 0x000000eb },
+ { 0x419f7c, 1, 0x04, 0x00000404 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gk110b_grctx_pack_tpc[] = {
+ { nvd7_grctx_init_pe_0 },
+ { nvf0_grctx_init_tex_0 },
+ { nvf0_grctx_init_mpc_0 },
+ { nvf0_grctx_init_l1c_0 },
+ { gk110b_grctx_init_sm_0 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+struct nouveau_oclass *
+gk110b_grctx_oclass = &(struct nvc0_grctx_oclass) {
+ .base.handle = NV_ENGCTX(GR, 0xf1),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+ .main = nve4_grctx_generate_main,
+ .unkn = nve4_grctx_generate_unkn,
+ .hub = nvf0_grctx_pack_hub,
+ .gpc = nvf0_grctx_pack_gpc,
+ .zcull = nvc0_grctx_pack_zcull,
+ .tpc = gk110b_grctx_pack_tpc,
+ .ppc = nvf0_grctx_pack_ppc,
+ .icmd = nvf0_grctx_pack_icmd,
+ .mthd = nvf0_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x600,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
index 224ee0287ab7..36fc9831cc93 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgk20a.c
@@ -41,7 +41,6 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nve4_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nve4_grctx_pack_hub,
.gpc = nve4_grctx_pack_gpc,
@@ -50,4 +49,15 @@ gk20a_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nve4_grctx_pack_ppc,
.icmd = nve4_grctx_pack_icmd,
.mthd = gk20a_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .bundle_min_gpm_fifo_depth = 0x62,
+ .bundle_token_limit = 0x100,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x240,
+ .attrib_nr = 0x240,
+ .alpha_nr_max = 0x648 + (0x648 / 2),
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
index b0d0fb2f4d08..62e918b9fa81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c
@@ -859,45 +859,74 @@ gm107_grctx_pack_ppc[] = {
******************************************************************************/
static void
-gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+gm107_grctx_generate_bundle(struct nvc0_grctx *info)
{
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW);
-
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x418e30, 0x80000000, 0, 0);
-
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418e24, 0x00000000, 8, 0);
- mmio_list(0x418e28, 0x80000030, 0, 0);
-
- mmio_list(0x4064c8, 0x018002c0, 0, 0);
-
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
- mmio_list(0x419c2c, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x0aa01000, 0, 0);
- mmio_list(0x4064c4, 0x0400ffff, 0, 0);
-
- /*XXX*/
- mmio_list(0x5030c0, 0x00001540, 0, 0);
- mmio_list(0x5030f4, 0x00000000, 0, 0);
- mmio_list(0x5030e4, 0x00002000, 0, 0);
- mmio_list(0x5030f8, 0x00003fc0, 0, 0);
- mmio_list(0x418ea0, 0x07151540, 0, 0);
-
- mmio_list(0x5032c0, 0x00001540, 0, 0);
- mmio_list(0x5032f4, 0x00001fe0, 0, 0);
- mmio_list(0x5032e4, 0x00002000, 0, 0);
- mmio_list(0x5032f8, 0x00006fc0, 0, 0);
- mmio_list(0x418ea4, 0x07151540, 0, 0);
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
+ impl->bundle_size / 0x20);
+ const u32 token_limit = impl->bundle_token_limit;
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ mmio_refn(info, 0x408004, 0x00000000, s, b);
+ mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_refn(info, 0x418e24, 0x00000000, s, b);
+ mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
+}
+
+static void
+gm107_grctx_generate_pagepool(struct nvc0_grctx *info)
+{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+ mmio_wr32(info, 0x4064cc, 0x80000000);
+ mmio_wr32(info, 0x418e30, 0x80000000); /* guess at it being related */
+}
+
+static void
+gm107_grctx_generate_attrib(struct nvc0_grctx *info)
+{
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = (void *)nvc0_grctx_impl(priv);
+ const u32 alpha = impl->alpha_nr;
+ const u32 attrib = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int max_batches = 0xffff;
+ u32 bo = 0;
+ u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+ int gpc, ppc, n = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (attrib << 16) | alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++, n++) {
+ const u32 as = alpha * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 u = 0x418ea0 + (n * 0x04);
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ mmio_wr32(info, o + 0xc0, bs);
+ mmio_wr32(info, o + 0xf4, bo);
+ bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, as);
+ mmio_wr32(info, o + 0xf8, ao);
+ ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, u, (0x715 /*XXX*/ << 16) | bs);
+ }
+ }
}
static void
@@ -934,7 +963,9 @@ gm107_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
gm107_grctx_generate_tpcid(priv);
@@ -979,7 +1010,6 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = gm107_grctx_generate_main,
- .mods = gm107_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = gm107_grctx_pack_hub,
.gpc = gm107_grctx_pack_gpc,
@@ -988,4 +1018,15 @@ gm107_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = gm107_grctx_pack_ppc,
.icmd = gm107_grctx_pack_icmd,
.mthd = gm107_grctx_pack_mthd,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x2c0,
+ .pagepool = gm107_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = gm107_grctx_generate_attrib,
+ .attrib_nr_max = 0xff0,
+ .attrib_nr = 0xaa0,
+ .alpha_nr_max = 0x1800,
+ .alpha_nr = 0x1000,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
index 8de4a4291548..ce252adbef81 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv108.c
@@ -531,50 +531,6 @@ nv108_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
-nv108_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
-{
- u32 magic[GPC_MAX][2];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000030, 0, 0);
- mmio_list(0x4064c8, 0x00c20200, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x02180648, 0, 0);
- mmio_list(0x4064c4, 0x0192ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
- u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * priv->tpc_nr[gpc];
- }
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * priv->tpc_nr[gpc];
- }
-
- mmio_list(0x17e91c, 0x0b040a0b, 0, 0);
- mmio_list(0x17e920, 0x00090d08, 0, 0);
-}
-
struct nouveau_oclass *
nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
@@ -587,7 +543,6 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nv108_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nv108_grctx_pack_hub,
.gpc = nv108_grctx_pack_gpc,
@@ -596,4 +551,15 @@ nv108_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nv108_grctx_pack_ppc,
.icmd = nv108_grctx_pack_icmd,
.mthd = nvf0_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0xc2,
+ .bundle_token_limit = 0x200,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index 833a96508c4e..b8e5fe60a1eb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -982,34 +982,93 @@ nvc0_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
+int
+nvc0_grctx_mmio_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
+{
+ if (info->data) {
+ info->buffer[info->buffer_nr] = round_up(info->addr, align);
+ info->addr = info->buffer[info->buffer_nr] + size;
+ info->data->size = size;
+ info->data->align = align;
+ info->data->access = access;
+ info->data++;
+ return info->buffer_nr++;
+ }
+ return -1;
+}
+
+void
+nvc0_grctx_mmio_item(struct nvc0_grctx *info, u32 addr, u32 data,
+ int shift, int buffer)
+{
+ if (info->data) {
+ if (shift >= 0) {
+ info->mmio->addr = addr;
+ info->mmio->data = data;
+ info->mmio->shift = shift;
+ info->mmio->buffer = buffer;
+ if (buffer >= 0)
+ data |= info->buffer[buffer] >> shift;
+ info->mmio++;
+ } else
+ return;
+ } else {
+ if (buffer >= 0)
+ return;
+ }
+
+ nv_wr32(info->priv, addr, data);
+}
+
+void
+nvc0_grctx_generate_bundle(struct nvc0_grctx *info)
+{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ mmio_refn(info, 0x408004, 0x00000000, s, b);
+ mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_refn(info, 0x418808, 0x00000000, s, b);
+ mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
+}
+
void
-nvc0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+nvc0_grctx_generate_pagepool(struct nvc0_grctx *info)
{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+}
+
+void
+nvc0_grctx_generate_attrib(struct nvc0_grctx *info)
+{
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ const u32 attrib = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
int gpc, tpc;
- u32 offset;
-
- mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
-
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000018, 0, 0);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000018, 0, 0);
-
- mmio_list(0x405830, 0x02180000, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+ u32 bo = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (attrib << 16));
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- u32 addr = TPC_UNIT(gpc, tpc, 0x0520);
- mmio_list(addr, 0x02180000 | offset, 0, 0);
- offset += 0x0324;
+ const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
+ mmio_skip(info, o, (attrib << 16) | ++bo);
+ mmio_wr32(info, o, (attrib << 16) | --bo);
+ bo += impl->attrib_nr_max;
}
}
}
@@ -1170,7 +1229,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
- nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_mmio(priv, oclass->hub);
nvc0_graph_mmio(priv, oclass->gpc);
@@ -1180,7 +1239,9 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
nvc0_grctx_generate_tpcid(priv);
@@ -1192,7 +1253,7 @@ nvc0_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nvc0_graph_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
nvc0_graph_mthd(priv, oclass->mthd);
- nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
}
int
@@ -1308,7 +1369,6 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
.unkn = nvc0_grctx_generate_unkn,
.hub = nvc0_grctx_pack_hub,
.gpc = nvc0_grctx_pack_gpc,
@@ -1316,4 +1376,11 @@ nvc0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc0_grctx_pack_tpc,
.icmd = nvc0_grctx_pack_icmd,
.mthd = nvc0_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc0_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
index 8da8b627b9d0..c776cd715e33 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.h
@@ -12,12 +12,19 @@ struct nvc0_grctx {
u64 addr;
};
+int nvc0_grctx_mmio_data(struct nvc0_grctx *, u32 size, u32 align, u32 access);
+void nvc0_grctx_mmio_item(struct nvc0_grctx *, u32 addr, u32 data, int s, int);
+
+#define mmio_vram(a,b,c,d) nvc0_grctx_mmio_data((a), (b), (c), (d))
+#define mmio_refn(a,b,c,d,e) nvc0_grctx_mmio_item((a), (b), (c), (d), (e))
+#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
+#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
+
struct nvc0_grctx_oclass {
struct nouveau_oclass base;
/* main context generation function */
void (*main)(struct nvc0_graph_priv *, struct nvc0_grctx *);
/* context-specific modify-on-first-load list generation function */
- void (*mods)(struct nvc0_graph_priv *, struct nvc0_grctx *);
void (*unkn)(struct nvc0_graph_priv *);
/* mmio context data */
const struct nvc0_graph_pack *hub;
@@ -28,30 +35,34 @@ struct nvc0_grctx_oclass {
/* indirect context data, generated with icmds/mthds */
const struct nvc0_graph_pack *icmd;
const struct nvc0_graph_pack *mthd;
+ /* bundle circular buffer */
+ void (*bundle)(struct nvc0_grctx *);
+ u32 bundle_size;
+ u32 bundle_min_gpm_fifo_depth;
+ u32 bundle_token_limit;
+ /* pagepool */
+ void (*pagepool)(struct nvc0_grctx *);
+ u32 pagepool_size;
+ /* attribute(/alpha) circular buffer */
+ void (*attrib)(struct nvc0_grctx *);
+ u32 attrib_nr_max;
+ u32 attrib_nr;
+ u32 alpha_nr_max;
+ u32 alpha_nr;
};
-#define mmio_data(s,a,p) do { \
- info->buffer[info->buffer_nr] = round_up(info->addr, (a)); \
- info->addr = info->buffer[info->buffer_nr++] + (s); \
- info->data->size = (s); \
- info->data->align = (a); \
- info->data->access = (p); \
- info->data++; \
-} while(0)
-
-#define mmio_list(r,d,s,b) do { \
- info->mmio->addr = (r); \
- info->mmio->data = (d); \
- info->mmio->shift = (s); \
- info->mmio->buffer = (b); \
- info->mmio++; \
- nv_wr32(priv, (r), (d) | ((s) ? (info->buffer[(b)] >> (s)) : 0)); \
-} while(0)
+static inline const struct nvc0_grctx_oclass *
+nvc0_grctx_impl(struct nvc0_graph_priv *priv)
+{
+ return (void *)nv_engine(priv)->cclass;
+}
extern struct nouveau_oclass *nvc0_grctx_oclass;
int nvc0_grctx_generate(struct nvc0_graph_priv *);
void nvc0_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nvc0_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc0_grctx_generate_bundle(struct nvc0_grctx *);
+void nvc0_grctx_generate_pagepool(struct nvc0_grctx *);
+void nvc0_grctx_generate_attrib(struct nvc0_grctx *);
void nvc0_grctx_generate_unkn(struct nvc0_graph_priv *);
void nvc0_grctx_generate_tpcid(struct nvc0_graph_priv *);
void nvc0_grctx_generate_r406028(struct nvc0_graph_priv *);
@@ -60,22 +71,27 @@ void nvc0_grctx_generate_r418bb8(struct nvc0_graph_priv *);
void nvc0_grctx_generate_r406800(struct nvc0_graph_priv *);
extern struct nouveau_oclass *nvc1_grctx_oclass;
-void nvc1_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nvc1_grctx_generate_attrib(struct nvc0_grctx *);
void nvc1_grctx_generate_unkn(struct nvc0_graph_priv *);
extern struct nouveau_oclass *nvc4_grctx_oclass;
extern struct nouveau_oclass *nvc8_grctx_oclass;
+
extern struct nouveau_oclass *nvd7_grctx_oclass;
+void nvd7_grctx_generate_attrib(struct nvc0_grctx *);
+
extern struct nouveau_oclass *nvd9_grctx_oclass;
extern struct nouveau_oclass *nve4_grctx_oclass;
extern struct nouveau_oclass *gk20a_grctx_oclass;
void nve4_grctx_generate_main(struct nvc0_graph_priv *, struct nvc0_grctx *);
-void nve4_grctx_generate_mods(struct nvc0_graph_priv *, struct nvc0_grctx *);
+void nve4_grctx_generate_bundle(struct nvc0_grctx *);
+void nve4_grctx_generate_pagepool(struct nvc0_grctx *);
void nve4_grctx_generate_unkn(struct nvc0_graph_priv *);
void nve4_grctx_generate_r418bb8(struct nvc0_graph_priv *);
extern struct nouveau_oclass *nvf0_grctx_oclass;
+extern struct nouveau_oclass *gk110b_grctx_oclass;
extern struct nouveau_oclass *nv108_grctx_oclass;
extern struct nouveau_oclass *gm107_grctx_oclass;
@@ -160,16 +176,23 @@ extern const struct nvc0_graph_pack nve4_grctx_pack_ppc[];
extern const struct nvc0_graph_pack nve4_grctx_pack_icmd[];
extern const struct nvc0_graph_init nve4_grctx_init_a097_0[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_icmd[];
+
extern const struct nvc0_graph_pack nvf0_grctx_pack_mthd[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_hub[];
extern const struct nvc0_graph_init nvf0_grctx_init_pri_0[];
extern const struct nvc0_graph_init nvf0_grctx_init_cwd_0[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_gpc[];
extern const struct nvc0_graph_init nvf0_grctx_init_gpc_unk_2[];
+extern const struct nvc0_graph_init nvf0_grctx_init_tex_0[];
extern const struct nvc0_graph_init nvf0_grctx_init_mpc_0[];
extern const struct nvc0_graph_init nvf0_grctx_init_l1c_0[];
+extern const struct nvc0_graph_pack nvf0_grctx_pack_ppc[];
+
extern const struct nvc0_graph_init nv108_grctx_init_rstr2d_0[];
extern const struct nvc0_graph_init nv108_grctx_init_prop_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
index 24a92c569c0a..c6ba8fed18f1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc1.c
@@ -727,38 +727,38 @@ nvc1_grctx_pack_tpc[] = {
******************************************************************************/
void
-nvc1_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+nvc1_grctx_generate_attrib(struct nvc0_grctx *info)
{
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ const u32 alpha = impl->alpha_nr;
+ const u32 beta = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int timeslice_mode = 1;
+ const int max_batches = 0xffff;
+ u32 bo = 0;
+ u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
int gpc, tpc;
- u32 offset;
- mmio_data(0x002000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000018, 0, 0);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000018, 0, 0);
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (beta << 16) | alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
- mmio_list(0x405830, 0x02180218, 0, 0);
- mmio_list(0x4064c4, 0x0086ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- u32 addr = TPC_UNIT(gpc, tpc, 0x0520);
- mmio_list(addr, 0x12180000 | offset, 0, 0);
- offset += 0x0324;
- }
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- u32 addr = TPC_UNIT(gpc, tpc, 0x0544);
- mmio_list(addr, 0x02180000 | offset, 0, 0);
- offset += 0x0324;
+ const u32 a = alpha;
+ const u32 b = beta;
+ const u32 t = timeslice_mode;
+ const u32 o = TPC_UNIT(gpc, tpc, 0x500);
+ mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
+ mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
+ bo += impl->attrib_nr_max;
+ mmio_wr32(info, o + 0x44, (a << 16) | ao);
+ ao += impl->alpha_nr_max;
}
}
}
@@ -786,7 +786,6 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc1_grctx_generate_mods,
.unkn = nvc1_grctx_generate_unkn,
.hub = nvc1_grctx_pack_hub,
.gpc = nvc1_grctx_pack_gpc,
@@ -794,4 +793,13 @@ nvc1_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc1_grctx_pack_tpc,
.icmd = nvc1_grctx_pack_icmd,
.mthd = nvc1_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc1_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x324,
+ .alpha_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
index e11ed5538193..41705c60cc47 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc4.c
@@ -92,7 +92,6 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
.unkn = nvc0_grctx_generate_unkn,
.hub = nvc0_grctx_pack_hub,
.gpc = nvc0_grctx_pack_gpc,
@@ -100,4 +99,11 @@ nvc4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc4_grctx_pack_tpc,
.icmd = nvc0_grctx_pack_icmd,
.mthd = nvc0_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc0_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
index feebd58dfe8d..8f804cd8f9c7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc8.c
@@ -343,7 +343,6 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc0_grctx_generate_mods,
.unkn = nvc0_grctx_generate_unkn,
.hub = nvc0_grctx_pack_hub,
.gpc = nvc8_grctx_pack_gpc,
@@ -351,4 +350,11 @@ nvc8_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvc0_grctx_pack_tpc,
.icmd = nvc8_grctx_pack_icmd,
.mthd = nvc8_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc0_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
index 1dbc8d7f2e86..fcf534fd9e65 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd7.c
@@ -177,44 +177,41 @@ nvd7_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
-nvd7_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+void
+nvd7_grctx_generate_attrib(struct nvc0_grctx *info)
{
- u32 magic[GPC_MAX][2];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000018, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000018, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
+ struct nvc0_graph_priv *priv = info->priv;
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(priv);
+ const u32 alpha = impl->alpha_nr;
+ const u32 beta = impl->attrib_nr;
+ const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size * priv->tpc_total, (1 << s), access);
+ const int timeslice_mode = 1;
+ const int max_batches = 0xffff;
+ u32 bo = 0;
+ u32 ao = bo + impl->attrib_nr_max * priv->tpc_total;
+ int gpc, ppc;
- mmio_list(0x405830, 0x02180324, 0, 0);
- mmio_list(0x4064c4, 0x00c9ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
- u16 magic1 = 0x0324 * priv->tpc_nr[gpc];
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * priv->tpc_nr[gpc];
- }
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_wr32(info, 0x405830, (beta << 16) | alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * priv->tpc_nr[gpc];
+ for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++) {
+ const u32 a = alpha * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 b = beta * priv->ppc_tpc_nr[gpc][ppc];
+ const u32 t = timeslice_mode;
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
+ mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
+ bo += impl->attrib_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, (a << 16) | ao);
+ ao += impl->alpha_nr_max * priv->ppc_tpc_nr[gpc][ppc];
+ }
}
- mmio_list(0x17e91c, 0x03060609, 0, 0); /* different from kepler */
}
void
@@ -223,7 +220,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
int i;
- nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_mmio(priv, oclass->hub);
nvc0_graph_mmio(priv, oclass->gpc);
@@ -233,7 +230,9 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
nvc0_grctx_generate_tpcid(priv);
@@ -248,7 +247,7 @@ nvd7_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nvc0_graph_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
nvc0_graph_mthd(priv, oclass->mthd);
- nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
}
struct nouveau_oclass *
@@ -263,7 +262,6 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvd7_grctx_generate_main,
- .mods = nvd7_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nvd7_grctx_pack_hub,
.gpc = nvd7_grctx_pack_gpc,
@@ -272,4 +270,13 @@ nvd7_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nvd7_grctx_pack_ppc,
.icmd = nvd9_grctx_pack_icmd,
.mthd = nvd9_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x324,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
index c665fb7e4660..b9a301b6fd9f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvd9.c
@@ -511,7 +511,6 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nvc0_grctx_generate_main,
- .mods = nvc1_grctx_generate_mods,
.unkn = nvc1_grctx_generate_unkn,
.hub = nvd9_grctx_pack_hub,
.gpc = nvd9_grctx_pack_gpc,
@@ -519,4 +518,13 @@ nvd9_grctx_oclass = &(struct nvc0_grctx_oclass) {
.tpc = nvd9_grctx_pack_tpc,
.icmd = nvd9_grctx_pack_icmd,
.mthd = nvd9_grctx_pack_mthd,
+ .bundle = nvc0_grctx_generate_bundle,
+ .bundle_size = 0x1800,
+ .pagepool = nvc0_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvc1_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x324,
+ .alpha_nr = 0x218,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
index c5b249238587..ccac2ee1a1cb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve4.c
@@ -839,47 +839,34 @@ nve4_grctx_pack_ppc[] = {
******************************************************************************/
void
-nve4_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
+nve4_grctx_generate_bundle(struct nvc0_grctx *info)
{
- u32 magic[GPC_MAX][2];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000030, 0, 0);
- mmio_list(0x4064c8, 0x01800600, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x02180648, 0, 0);
- mmio_list(0x4064c4, 0x0192ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
- u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * priv->tpc_nr[gpc];
- }
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * priv->tpc_nr[gpc];
- }
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
+ impl->bundle_size / 0x20);
+ const u32 token_limit = impl->bundle_token_limit;
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
+ mmio_refn(info, 0x408004, 0x00000000, s, b);
+ mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_refn(info, 0x418808, 0x00000000, s, b);
+ mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b);
+ mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
+}
- mmio_list(0x17e91c, 0x06060609, 0, 0);
- mmio_list(0x17e920, 0x00090a05, 0, 0);
+void
+nve4_grctx_generate_pagepool(struct nvc0_grctx *info)
+{
+ const struct nvc0_grctx_oclass *impl = nvc0_grctx_impl(info->priv);
+ const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
+ const int s = 8;
+ const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
+ mmio_refn(info, 0x40800c, 0x00000000, s, b);
+ mmio_wr32(info, 0x408010, 0x80000000);
+ mmio_refn(info, 0x419004, 0x00000000, s, b);
+ mmio_wr32(info, 0x419008, 0x00000000);
+ mmio_wr32(info, 0x4064cc, 0x80000000);
}
void
@@ -957,7 +944,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
struct nvc0_grctx_oclass *oclass = (void *)nv_engine(priv)->cclass;
int i;
- nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_mmio(priv, oclass->hub);
nvc0_graph_mmio(priv, oclass->gpc);
@@ -967,7 +954,9 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nv_wr32(priv, 0x404154, 0x00000000);
- oclass->mods(priv, info);
+ oclass->bundle(info);
+ oclass->pagepool(info);
+ oclass->attrib(info);
oclass->unkn(priv);
nvc0_grctx_generate_tpcid(priv);
@@ -991,7 +980,7 @@ nve4_grctx_generate_main(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
nvc0_graph_icmd(priv, oclass->icmd);
nv_wr32(priv, 0x404154, 0x00000400);
nvc0_graph_mthd(priv, oclass->mthd);
- nv_mask(priv, 0x000260, 0x00000001, 0x00000001);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
nv_mask(priv, 0x418800, 0x00200000, 0x00200000);
nv_mask(priv, 0x41be10, 0x00800000, 0x00800000);
@@ -1009,7 +998,6 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nve4_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nve4_grctx_pack_hub,
.gpc = nve4_grctx_pack_gpc,
@@ -1018,4 +1006,15 @@ nve4_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nve4_grctx_pack_ppc,
.icmd = nve4_grctx_pack_icmd,
.mthd = nve4_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x600,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
index dec03f04114d..e9b0dcf95a49 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvf0.c
@@ -279,7 +279,7 @@ nvf0_grctx_init_icmd_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_icmd[] = {
{ nvf0_grctx_init_icmd_0 },
{}
@@ -668,7 +668,7 @@ nvf0_grctx_init_be_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_hub[] = {
{ nvc0_grctx_init_main_0 },
{ nvf0_grctx_init_fe_0 },
@@ -704,7 +704,7 @@ nvf0_grctx_init_gpc_unk_2[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_gpc[] = {
{ nvc0_grctx_init_gpc_unk_0 },
{ nvd9_grctx_init_prop_0 },
@@ -718,7 +718,7 @@ nvf0_grctx_pack_gpc[] = {
{}
};
-static const struct nvc0_graph_init
+const struct nvc0_graph_init
nvf0_grctx_init_tex_0[] = {
{ 0x419a00, 1, 0x04, 0x000000f0 },
{ 0x419a04, 1, 0x04, 0x00000001 },
@@ -797,7 +797,7 @@ nvf0_grctx_init_cbm_0[] = {
{}
};
-static const struct nvc0_graph_pack
+const struct nvc0_graph_pack
nvf0_grctx_pack_ppc[] = {
{ nve4_grctx_init_pes_0 },
{ nvf0_grctx_init_cbm_0 },
@@ -809,58 +809,6 @@ nvf0_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
-static void
-nvf0_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
-{
- u32 magic[GPC_MAX][4];
- u32 offset;
- int gpc;
-
- mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS);
- mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW);
- mmio_list(0x40800c, 0x00000000, 8, 1);
- mmio_list(0x408010, 0x80000000, 0, 0);
- mmio_list(0x419004, 0x00000000, 8, 1);
- mmio_list(0x419008, 0x00000000, 0, 0);
- mmio_list(0x4064cc, 0x80000000, 0, 0);
- mmio_list(0x408004, 0x00000000, 8, 0);
- mmio_list(0x408008, 0x80000030, 0, 0);
- mmio_list(0x418808, 0x00000000, 8, 0);
- mmio_list(0x41880c, 0x80000030, 0, 0);
- mmio_list(0x4064c8, 0x01800600, 0, 0);
- mmio_list(0x418810, 0x80000000, 12, 2);
- mmio_list(0x419848, 0x10000000, 12, 2);
-
- mmio_list(0x405830, 0x02180648, 0, 0);
- mmio_list(0x4064c4, 0x0192ffff, 0, 0);
-
- for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * (priv->tpc_nr[gpc] - 1);
- u16 magic1 = 0x0648 * (priv->tpc_nr[gpc] - 1);
- u16 magic2 = 0x0218;
- u16 magic3 = 0x0648;
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * (priv->tpc_nr[gpc] - 1);
- magic[gpc][2] = 0x10000000 | (magic2 << 16) | offset;
- magic[gpc][3] = 0x00000000 | (magic3 << 16);
- offset += 0x0324;
- }
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * (priv->tpc_nr[gpc] - 1);
- mmio_list(GPC_UNIT(gpc, 0x32c0), magic[gpc][2], 0, 0);
- mmio_list(GPC_UNIT(gpc, 0x32e4), magic[gpc][3] | offset, 0, 0);
- offset += 0x07ff;
- }
-
- mmio_list(0x17e91c, 0x06060609, 0, 0);
- mmio_list(0x17e920, 0x00090a05, 0, 0);
-}
-
struct nouveau_oclass *
nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf0),
@@ -873,7 +821,6 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.wr32 = _nouveau_graph_context_wr32,
},
.main = nve4_grctx_generate_main,
- .mods = nvf0_grctx_generate_mods,
.unkn = nve4_grctx_generate_unkn,
.hub = nvf0_grctx_pack_hub,
.gpc = nvf0_grctx_pack_gpc,
@@ -882,4 +829,15 @@ nvf0_grctx_oclass = &(struct nvc0_grctx_oclass) {
.ppc = nvf0_grctx_pack_ppc,
.icmd = nvf0_grctx_pack_icmd,
.mthd = nvf0_grctx_pack_mthd,
+ .bundle = nve4_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x7c0,
+ .pagepool = nve4_grctx_generate_pagepool,
+ .pagepool_size = 0x8000,
+ .attrib = nvd7_grctx_generate_attrib,
+ .attrib_nr_max = 0x324,
+ .attrib_nr = 0x218,
+ .alpha_nr_max = 0x7ff,
+ .alpha_nr = 0x648,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c
new file mode 100644
index 000000000000..d07b19dc168d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk110b.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nvc0.h"
+#include "ctxnvc0.h"
+
+/*******************************************************************************
+ * PGRAPH register lists
+ ******************************************************************************/
+
+static const struct nvc0_graph_init
+gk110b_graph_init_l1c_0[] = {
+ { 0x419c98, 1, 0x04, 0x00000000 },
+ { 0x419ca8, 1, 0x04, 0x00000000 },
+ { 0x419cb0, 1, 0x04, 0x09000000 },
+ { 0x419cb4, 1, 0x04, 0x00000000 },
+ { 0x419cb8, 1, 0x04, 0x00b08bea },
+ { 0x419c84, 1, 0x04, 0x00010384 },
+ { 0x419cbc, 1, 0x04, 0x281b3646 },
+ { 0x419cc0, 2, 0x04, 0x00000000 },
+ { 0x419c80, 1, 0x04, 0x00020230 },
+ { 0x419ccc, 2, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_init
+gk110b_graph_init_sm_0[] = {
+ { 0x419e00, 1, 0x04, 0x00000080 },
+ { 0x419ea0, 1, 0x04, 0x00000000 },
+ { 0x419ee4, 1, 0x04, 0x00000000 },
+ { 0x419ea4, 1, 0x04, 0x00000100 },
+ { 0x419ea8, 1, 0x04, 0x00000000 },
+ { 0x419eb4, 1, 0x04, 0x00000000 },
+ { 0x419ebc, 2, 0x04, 0x00000000 },
+ { 0x419edc, 1, 0x04, 0x00000000 },
+ { 0x419f00, 1, 0x04, 0x00000000 },
+ { 0x419ed0, 1, 0x04, 0x00002616 },
+ { 0x419f74, 1, 0x04, 0x00015555 },
+ { 0x419f80, 4, 0x04, 0x00000000 },
+ {}
+};
+
+static const struct nvc0_graph_pack
+gk110b_graph_pack_mmio[] = {
+ { nve4_graph_init_main_0 },
+ { nvf0_graph_init_fe_0 },
+ { nvc0_graph_init_pri_0 },
+ { nvc0_graph_init_rstr2d_0 },
+ { nvd9_graph_init_pd_0 },
+ { nvf0_graph_init_ds_0 },
+ { nvc0_graph_init_scc_0 },
+ { nvf0_graph_init_sked_0 },
+ { nvf0_graph_init_cwd_0 },
+ { nvd9_graph_init_prop_0 },
+ { nvc1_graph_init_gpc_unk_0 },
+ { nvc0_graph_init_setup_0 },
+ { nvc0_graph_init_crstr_0 },
+ { nvc1_graph_init_setup_1 },
+ { nvc0_graph_init_zcull_0 },
+ { nvd9_graph_init_gpm_0 },
+ { nvf0_graph_init_gpc_unk_1 },
+ { nvc0_graph_init_gcc_0 },
+ { nve4_graph_init_tpccs_0 },
+ { nvf0_graph_init_tex_0 },
+ { nve4_graph_init_pe_0 },
+ { gk110b_graph_init_l1c_0 },
+ { nvc0_graph_init_mpc_0 },
+ { gk110b_graph_init_sm_0 },
+ { nvd7_graph_init_pes_0 },
+ { nvd7_graph_init_wwdx_0 },
+ { nvd7_graph_init_cbm_0 },
+ { nve4_graph_init_be_0 },
+ { nvc0_graph_init_fe_1 },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+struct nouveau_oclass *
+gk110b_graph_oclass = &(struct nvc0_graph_oclass) {
+ .base.handle = NV_ENGINE(GR, 0xf1),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = nve4_graph_init,
+ .fini = nvf0_graph_fini,
+ },
+ .cclass = &gk110b_grctx_oclass,
+ .sclass = nvf0_graph_sclass,
+ .mmio = gk110b_graph_pack_mmio,
+ .fecs.ucode = &nvf0_graph_fecs_ucode,
+ .gpccs.ucode = &nvf0_graph_gpccs_ucode,
+ .ppc_nr = 2,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
index 83048a56430d..7d0abe9f3fe7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gk20a.c
@@ -27,8 +27,8 @@ static struct nouveau_oclass
gk20a_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa040, &nouveau_object_ofuncs },
- { 0xa297, &nouveau_object_ofuncs },
- { 0xa0c0, &nouveau_object_ofuncs },
+ { KEPLER_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -39,9 +39,10 @@ gk20a_graph_oclass = &(struct nvc0_graph_oclass) {
.ctor = nvc0_graph_ctor,
.dtor = nvc0_graph_dtor,
.init = nve4_graph_init,
- .fini = nve4_graph_fini,
+ .fini = _nouveau_graph_fini,
},
.cclass = &gk20a_grctx_oclass,
.sclass = gk20a_graph_sclass,
.mmio = nve4_graph_pack_mmio,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
index 21c5f31d607f..4bdbdab2fd9a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/gm107.c
@@ -36,8 +36,8 @@ static struct nouveau_oclass
gm107_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa140, &nouveau_object_ofuncs },
- { 0xb097, &nouveau_object_ofuncs },
- { 0xb0c0, &nouveau_object_ofuncs },
+ { MAXWELL_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { MAXWELL_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -425,6 +425,9 @@ gm107_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400134, 0xffffffff);
nv_wr32(priv, 0x400054, 0x2c350f63);
+
+ nvc0_graph_zbc_init(priv);
+
return nvc0_graph_init_ctxctl(priv);
}
@@ -462,4 +465,5 @@ gm107_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = gm107_graph_pack_mmio,
.fecs.ucode = 0 ? &gm107_graph_fecs_ucode : NULL,
.gpccs.ucode = &gm107_graph_gpccs_ucode,
+ .ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index ad13dcdd15f9..f70e2f67a4dd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/handle.h>
#include <core/namedb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 4532f7e5618c..2b12b09683c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/handle.h>
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
index 00ea1a089822..2b0e8f48c029 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv108.c
@@ -33,7 +33,7 @@ static struct nouveau_oclass
nv108_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa140, &nouveau_object_ofuncs },
- { 0xa197, &nouveau_object_ofuncs },
+ { KEPLER_B, &nvc0_fermi_ofuncs },
{ 0xa1c0, &nouveau_object_ofuncs },
{}
};
@@ -220,4 +220,5 @@ nv108_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = nv108_graph_pack_mmio,
.fecs.ucode = &nv108_graph_fecs_ucode,
.gpccs.ucode = &nv108_graph_gpccs_ucode,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index d145e080899a..ceb9c746d94e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,6 +1,5 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/handle.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
index 7a80d005a974..f8a6fdd7d5e8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
index 3e1f32ee43d4..5de9caa2ef67 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
index e451db32e92a..2f9dbc709389 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
index 9385ac7b44a4..34dd26c70b64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
index 9ce84b73f86a..2fb5756d9f66 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -1,5 +1,4 @@
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/enum.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 6477fbf6a550..4f401174868d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/handle.h>
#include <core/engctx.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 20665c21d80e..38e0aa26f1cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/client.h>
#include <core/handle.h>
#include <core/engctx.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index aa0838916354..db19191176fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -26,15 +26,226 @@
#include "ctxnvc0.h"
/*******************************************************************************
+ * Zero Bandwidth Clear
+ ******************************************************************************/
+
+static void
+nvc0_graph_zbc_clear_color(struct nvc0_graph_priv *priv, int zbc)
+{
+ if (priv->zbc_color[zbc].format) {
+ nv_wr32(priv, 0x405804, priv->zbc_color[zbc].ds[0]);
+ nv_wr32(priv, 0x405808, priv->zbc_color[zbc].ds[1]);
+ nv_wr32(priv, 0x40580c, priv->zbc_color[zbc].ds[2]);
+ nv_wr32(priv, 0x405810, priv->zbc_color[zbc].ds[3]);
+ }
+ nv_wr32(priv, 0x405814, priv->zbc_color[zbc].format);
+ nv_wr32(priv, 0x405820, zbc);
+ nv_wr32(priv, 0x405824, 0x00000004); /* TRIGGER | WRITE | COLOR */
+}
+
+static int
+nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format,
+ const u32 ds[4], const u32 l2[4])
+{
+ struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ int zbc = -ENOSPC, i;
+
+ for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+ if (priv->zbc_color[i].format) {
+ if (priv->zbc_color[i].format != format)
+ continue;
+ if (memcmp(priv->zbc_color[i].ds, ds, sizeof(
+ priv->zbc_color[i].ds)))
+ continue;
+ if (memcmp(priv->zbc_color[i].l2, l2, sizeof(
+ priv->zbc_color[i].l2))) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return i;
+ } else {
+ zbc = (zbc < 0) ? i : zbc;
+ }
+ }
+
+ memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
+ memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
+ priv->zbc_color[zbc].format = format;
+ ltc->zbc_color_get(ltc, zbc, l2);
+ nvc0_graph_zbc_clear_color(priv, zbc);
+ return zbc;
+}
+
+static void
+nvc0_graph_zbc_clear_depth(struct nvc0_graph_priv *priv, int zbc)
+{
+ if (priv->zbc_depth[zbc].format)
+ nv_wr32(priv, 0x405818, priv->zbc_depth[zbc].ds);
+ nv_wr32(priv, 0x40581c, priv->zbc_depth[zbc].format);
+ nv_wr32(priv, 0x405820, zbc);
+ nv_wr32(priv, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */
+}
+
+static int
+nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format,
+ const u32 ds, const u32 l2)
+{
+ struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ int zbc = -ENOSPC, i;
+
+ for (i = ltc->zbc_min; i <= ltc->zbc_max; i++) {
+ if (priv->zbc_depth[i].format) {
+ if (priv->zbc_depth[i].format != format)
+ continue;
+ if (priv->zbc_depth[i].ds != ds)
+ continue;
+ if (priv->zbc_depth[i].l2 != l2) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return i;
+ } else {
+ zbc = (zbc < 0) ? i : zbc;
+ }
+ }
+
+ priv->zbc_depth[zbc].format = format;
+ priv->zbc_depth[zbc].ds = ds;
+ priv->zbc_depth[zbc].l2 = l2;
+ ltc->zbc_depth_get(ltc, zbc, l2);
+ nvc0_graph_zbc_clear_depth(priv, zbc);
+ return zbc;
+}
+
+/*******************************************************************************
* Graphics object classes
******************************************************************************/
+static int
+nvc0_fermi_mthd_zbc_color(struct nouveau_object *object, void *data, u32 size)
+{
+ struct nvc0_graph_priv *priv = (void *)object->engine;
+ union {
+ struct fermi_a_zbc_color_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ switch (args->v0.format) {
+ case FERMI_A_ZBC_COLOR_V0_FMT_ZERO:
+ case FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32:
+ case FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8:
+ case FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10:
+ case FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11:
+ ret = nvc0_graph_zbc_color_get(priv, args->v0.format,
+ args->v0.ds,
+ args->v0.l2);
+ if (ret >= 0) {
+ args->v0.index = ret;
+ return 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvc0_fermi_mthd_zbc_depth(struct nouveau_object *object, void *data, u32 size)
+{
+ struct nvc0_graph_priv *priv = (void *)object->engine;
+ union {
+ struct fermi_a_zbc_depth_v0 v0;
+ } *args = data;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ switch (args->v0.format) {
+ case FERMI_A_ZBC_DEPTH_V0_FMT_FP32:
+ ret = nvc0_graph_zbc_depth_get(priv, args->v0.format,
+ args->v0.ds,
+ args->v0.l2);
+ return (ret >= 0) ? 0 : -ENOSPC;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int
+nvc0_fermi_mthd(struct nouveau_object *object, u32 mthd, void *data, u32 size)
+{
+ switch (mthd) {
+ case FERMI_A_ZBC_COLOR:
+ return nvc0_fermi_mthd_zbc_color(object, data, size);
+ case FERMI_A_ZBC_DEPTH:
+ return nvc0_fermi_mthd_zbc_depth(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+struct nouveau_ofuncs
+nvc0_fermi_ofuncs = {
+ .ctor = _nouveau_object_ctor,
+ .dtor = nouveau_object_destroy,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ .mthd = nvc0_fermi_mthd,
+};
+
+static int
+nvc0_graph_set_shader_exceptions(struct nouveau_object *object, u32 mthd,
+ void *pdata, u32 size)
+{
+ struct nvc0_graph_priv *priv = (void *)nv_engine(object);
+ if (size >= sizeof(u32)) {
+ u32 data = *(u32 *)pdata ? 0xffffffff : 0x00000000;
+ nv_wr32(priv, 0x419e44, data);
+ nv_wr32(priv, 0x419e4c, data);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+struct nouveau_omthds
+nvc0_graph_9097_omthds[] = {
+ { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
+ {}
+};
+
+struct nouveau_omthds
+nvc0_graph_90c0_omthds[] = {
+ { 0x1528, 0x1528, nvc0_graph_set_shader_exceptions },
+ {}
+};
+
struct nouveau_oclass
nvc0_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0x9039, &nouveau_object_ofuncs },
- { 0x9097, &nouveau_object_ofuncs },
- { 0x90c0, &nouveau_object_ofuncs },
+ { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -98,7 +309,7 @@ nvc0_graph_context_ctor(struct nouveau_object *parent,
u32 addr = mmio->addr;
u32 data = mmio->data;
- if (mmio->shift) {
+ if (mmio->buffer >= 0) {
u64 info = chan->data[mmio->buffer].vma.offset;
data |= info >> mmio->shift;
}
@@ -407,6 +618,35 @@ nvc0_graph_pack_mmio[] = {
******************************************************************************/
void
+nvc0_graph_zbc_init(struct nvc0_graph_priv *priv)
+{
+ const u32 zero[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+ const u32 one[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
+ 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff };
+ const u32 f32_0[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000 };
+ const u32 f32_1[] = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
+ 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 };
+ struct nouveau_ltc *ltc = nouveau_ltc(priv);
+ int index;
+
+ if (!priv->zbc_color[0].format) {
+ nvc0_graph_zbc_color_get(priv, 1, & zero[0], &zero[4]);
+ nvc0_graph_zbc_color_get(priv, 2, & one[0], &one[4]);
+ nvc0_graph_zbc_color_get(priv, 4, &f32_0[0], &f32_0[4]);
+ nvc0_graph_zbc_color_get(priv, 4, &f32_1[0], &f32_1[4]);
+ nvc0_graph_zbc_depth_get(priv, 1, 0x00000000, 0x00000000);
+ nvc0_graph_zbc_depth_get(priv, 1, 0x3f800000, 0x3f800000);
+ }
+
+ for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
+ nvc0_graph_zbc_clear_color(priv, index);
+ for (index = ltc->zbc_min; index <= ltc->zbc_max; index++)
+ nvc0_graph_zbc_clear_depth(priv, index);
+}
+
+void
nvc0_graph_mmio(struct nvc0_graph_priv *priv, const struct nvc0_graph_pack *p)
{
const struct nvc0_graph_pack *pack;
@@ -969,17 +1209,16 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
{
struct nvc0_graph_oclass *oclass = (void *)nv_object(priv)->oclass;
struct nvc0_grctx_oclass *cclass = (void *)nv_engine(priv)->cclass;
- u32 r000260;
int i;
if (priv->firmware) {
/* load fuc microcode */
- r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
&priv->fuc409d);
nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
&priv->fuc41ad);
- nv_wr32(priv, 0x000260, r000260);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
/* start both of them running */
nv_wr32(priv, 0x409840, 0xffffffff);
@@ -1066,7 +1305,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
}
/* load HUB microcode */
- r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 0);
nv_wr32(priv, 0x4091c0, 0x01000000);
for (i = 0; i < oclass->fecs.ucode->data.size / 4; i++)
nv_wr32(priv, 0x4091c4, oclass->fecs.ucode->data.data[i]);
@@ -1089,7 +1328,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
nv_wr32(priv, 0x41a188, i >> 6);
nv_wr32(priv, 0x41a184, oclass->gpccs.ucode->code.data[i]);
}
- nv_wr32(priv, 0x000260, r000260);
+ nouveau_mc(priv)->unk260(nouveau_mc(priv), 1);
/* load register lists */
nvc0_graph_init_csdata(priv, cclass->hub, 0x409000, 0x000, 0x000000);
@@ -1224,6 +1463,9 @@ nvc0_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400134, 0xffffffff);
nv_wr32(priv, 0x400054, 0x34ce3464);
+
+ nvc0_graph_zbc_init(priv);
+
return nvc0_graph_init_ctxctl(priv);
}
@@ -1287,7 +1529,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_device *device = nv_device(parent);
struct nvc0_graph_priv *priv;
bool use_ext_fw, enable;
- int ret, i;
+ int ret, i, j;
use_ext_fw = nouveau_boolopt(device->cfgopt, "NvGrUseFW",
oclass->fecs.ucode == NULL);
@@ -1333,6 +1575,11 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
for (i = 0; i < priv->gpc_nr; i++) {
priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
priv->tpc_total += priv->tpc_nr[i];
+ priv->ppc_nr[i] = oclass->ppc_nr;
+ for (j = 0; j < priv->ppc_nr[i]; j++) {
+ u8 mask = nv_rd32(priv, GPC_UNIT(i, 0x0c30 + (j * 4)));
+ priv->ppc_tpc_nr[i][j] = hweight8(mask);
+ }
}
/*XXX: these need figuring out... though it might not even matter */
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index ffc289198dd8..7ed9e89c3435 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -30,10 +30,15 @@
#include <core/gpuobj.h>
#include <core/option.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+
#include <subdev/fb.h>
#include <subdev/vm.h>
#include <subdev/bar.h>
#include <subdev/timer.h>
+#include <subdev/mc.h>
+#include <subdev/ltc.h>
#include <engine/fifo.h>
#include <engine/graph.h>
@@ -60,7 +65,7 @@ struct nvc0_graph_mmio {
u32 addr;
u32 data;
u32 shift;
- u32 buffer;
+ int buffer;
};
struct nvc0_graph_fuc {
@@ -68,6 +73,18 @@ struct nvc0_graph_fuc {
u32 size;
};
+struct nvc0_graph_zbc_color {
+ u32 format;
+ u32 ds[4];
+ u32 l2[4];
+};
+
+struct nvc0_graph_zbc_depth {
+ u32 format;
+ u32 ds;
+ u32 l2;
+};
+
struct nvc0_graph_priv {
struct nouveau_graph base;
@@ -77,10 +94,15 @@ struct nvc0_graph_priv {
struct nvc0_graph_fuc fuc41ad;
bool firmware;
+ struct nvc0_graph_zbc_color zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT];
+ struct nvc0_graph_zbc_depth zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
+
u8 rop_nr;
u8 gpc_nr;
u8 tpc_nr[GPC_MAX];
u8 tpc_total;
+ u8 ppc_nr[GPC_MAX];
+ u8 ppc_tpc_nr[GPC_MAX][4];
struct nouveau_gpuobj *unk4188b4;
struct nouveau_gpuobj *unk4188b8;
@@ -118,12 +140,20 @@ int nvc0_graph_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_object **);
void nvc0_graph_dtor(struct nouveau_object *);
int nvc0_graph_init(struct nouveau_object *);
+void nvc0_graph_zbc_init(struct nvc0_graph_priv *);
+
int nve4_graph_fini(struct nouveau_object *, bool);
int nve4_graph_init(struct nouveau_object *);
-extern struct nouveau_oclass nvc0_graph_sclass[];
+int nvf0_graph_fini(struct nouveau_object *, bool);
+
+extern struct nouveau_ofuncs nvc0_fermi_ofuncs;
+extern struct nouveau_oclass nvc0_graph_sclass[];
+extern struct nouveau_omthds nvc0_graph_9097_omthds[];
+extern struct nouveau_omthds nvc0_graph_90c0_omthds[];
extern struct nouveau_oclass nvc8_graph_sclass[];
+extern struct nouveau_oclass nvf0_graph_sclass[];
struct nvc0_graph_init {
u32 addr;
@@ -149,6 +179,9 @@ struct nvc0_graph_ucode {
extern struct nvc0_graph_ucode nvc0_graph_fecs_ucode;
extern struct nvc0_graph_ucode nvc0_graph_gpccs_ucode;
+extern struct nvc0_graph_ucode nvf0_graph_fecs_ucode;
+extern struct nvc0_graph_ucode nvf0_graph_gpccs_ucode;
+
struct nvc0_graph_oclass {
struct nouveau_oclass base;
struct nouveau_oclass **cclass;
@@ -160,6 +193,7 @@ struct nvc0_graph_oclass {
struct {
struct nvc0_graph_ucode *ucode;
} gpccs;
+ int ppc_nr;
};
void nvc0_graph_mmio(struct nvc0_graph_priv *, const struct nvc0_graph_pack *);
@@ -223,9 +257,11 @@ extern const struct nvc0_graph_init nve4_graph_init_be_0[];
extern const struct nvc0_graph_pack nve4_graph_pack_mmio[];
extern const struct nvc0_graph_init nvf0_graph_init_fe_0[];
+extern const struct nvc0_graph_init nvf0_graph_init_ds_0[];
extern const struct nvc0_graph_init nvf0_graph_init_sked_0[];
extern const struct nvc0_graph_init nvf0_graph_init_cwd_0[];
extern const struct nvc0_graph_init nvf0_graph_init_gpc_unk_1[];
+extern const struct nvc0_graph_init nvf0_graph_init_tex_0[];
extern const struct nvc0_graph_init nvf0_graph_init_sm_0[];
extern const struct nvc0_graph_init nv108_graph_init_gpc_unk_0[];
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
index 30cab0b2eba1..93d58e5b82c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc1.c
@@ -33,9 +33,9 @@ static struct nouveau_oclass
nvc1_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0x9039, &nouveau_object_ofuncs },
- { 0x9097, &nouveau_object_ofuncs },
- { 0x90c0, &nouveau_object_ofuncs },
- { 0x9197, &nouveau_object_ofuncs },
+ { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
index a6bf783e1256..692e1eda0eb4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc8.c
@@ -33,10 +33,10 @@ struct nouveau_oclass
nvc8_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0x9039, &nouveau_object_ofuncs },
- { 0x9097, &nouveau_object_ofuncs },
- { 0x90c0, &nouveau_object_ofuncs },
- { 0x9197, &nouveau_object_ofuncs },
- { 0x9297, &nouveau_object_ofuncs },
+ { FERMI_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_C, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { FERMI_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
index 2a6a94e2a041..41e8445c7eea 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvd7.c
@@ -133,4 +133,5 @@ nvd7_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = nvd7_graph_pack_mmio,
.fecs.ucode = &nvd7_graph_fecs_ucode,
.gpccs.ucode = &nvd7_graph_gpccs_ucode,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
index 51e0c075ad34..0c71f5c67ae0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve4.c
@@ -22,6 +22,8 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include <subdev/pwr.h>
+
#include "nvc0.h"
#include "ctxnvc0.h"
@@ -33,8 +35,8 @@ static struct nouveau_oclass
nve4_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa040, &nouveau_object_ofuncs },
- { 0xa097, &nouveau_object_ofuncs },
- { 0xa0c0, &nouveau_object_ofuncs },
+ { KEPLER_A, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { KEPLER_COMPUTE_A, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -190,39 +192,20 @@ nve4_graph_pack_mmio[] = {
******************************************************************************/
int
-nve4_graph_fini(struct nouveau_object *object, bool suspend)
-{
- struct nvc0_graph_priv *priv = (void *)object;
-
- /*XXX: this is a nasty hack to power on gr on certain boards
- * where it's disabled by therm, somehow. ideally it'd
- * be nice to know when we should be doing this, and why,
- * but, it's yet to be determined. for now we test for
- * the particular mmio error that occurs in the situation,
- * and then bash therm in the way nvidia do.
- */
- nv_mask(priv, 0x000200, 0x08001000, 0x08001000);
- nv_rd32(priv, 0x000200);
- if (nv_rd32(priv, 0x400700) == 0xbadf1000) {
- nv_mask(priv, 0x000200, 0x08001000, 0x00000000);
- nv_rd32(priv, 0x000200);
- nv_mask(priv, 0x020004, 0xc0000000, 0x40000000);
- }
-
- return nouveau_graph_fini(&priv->base, suspend);
-}
-
-int
nve4_graph_init(struct nouveau_object *object)
{
struct nvc0_graph_oclass *oclass = (void *)object->oclass;
struct nvc0_graph_priv *priv = (void *)object;
+ struct nouveau_pwr *ppwr = nouveau_pwr(priv);
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8] = {};
u8 tpcnr[GPC_MAX];
int gpc, tpc, rop;
int ret, i;
+ if (ppwr)
+ ppwr->pgob(ppwr, false);
+
ret = nouveau_graph_init(&priv->base);
if (ret)
return ret;
@@ -320,6 +303,9 @@ nve4_graph_init(struct nouveau_object *object)
nv_wr32(priv, 0x400134, 0xffffffff);
nv_wr32(priv, 0x400054, 0x34ce3464);
+
+ nvc0_graph_zbc_init(priv);
+
return nvc0_graph_init_ctxctl(priv);
}
@@ -350,11 +336,12 @@ nve4_graph_oclass = &(struct nvc0_graph_oclass) {
.ctor = nvc0_graph_ctor,
.dtor = nvc0_graph_dtor,
.init = nve4_graph_init,
- .fini = nve4_graph_fini,
+ .fini = _nouveau_graph_fini,
},
.cclass = &nve4_grctx_oclass,
.sclass = nve4_graph_sclass,
.mmio = nve4_graph_pack_mmio,
.fecs.ucode = &nve4_graph_fecs_ucode,
.gpccs.ucode = &nve4_graph_gpccs_ucode,
+ .ppc_nr = 1,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
index c96762122b9b..c306c0f2fc84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvf0.c
@@ -29,12 +29,12 @@
* Graphics object classes
******************************************************************************/
-static struct nouveau_oclass
+struct nouveau_oclass
nvf0_graph_sclass[] = {
{ 0x902d, &nouveau_object_ofuncs },
{ 0xa140, &nouveau_object_ofuncs },
- { 0xa197, &nouveau_object_ofuncs },
- { 0xa1c0, &nouveau_object_ofuncs },
+ { KEPLER_B, &nvc0_fermi_ofuncs, nvc0_graph_9097_omthds },
+ { KEPLER_COMPUTE_B, &nouveau_object_ofuncs, nvc0_graph_90c0_omthds },
{}
};
@@ -50,7 +50,7 @@ nvf0_graph_init_fe_0[] = {
{}
};
-static const struct nvc0_graph_init
+const struct nvc0_graph_init
nvf0_graph_init_ds_0[] = {
{ 0x405844, 1, 0x04, 0x00ffffff },
{ 0x405850, 1, 0x04, 0x00000000 },
@@ -88,7 +88,7 @@ nvf0_graph_init_gpc_unk_1[] = {
{}
};
-static const struct nvc0_graph_init
+const struct nvc0_graph_init
nvf0_graph_init_tex_0[] = {
{ 0x419ab0, 1, 0x04, 0x00000000 },
{ 0x419ac8, 1, 0x04, 0x00000000 },
@@ -170,7 +170,7 @@ nvf0_graph_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
-static int
+int
nvf0_graph_fini(struct nouveau_object *object, bool suspend)
{
struct nvc0_graph_priv *priv = (void *)object;
@@ -209,7 +209,7 @@ nvf0_graph_fini(struct nouveau_object *object, bool suspend)
#include "fuc/hubnvf0.fuc.h"
-static struct nvc0_graph_ucode
+struct nvc0_graph_ucode
nvf0_graph_fecs_ucode = {
.code.data = nvf0_grhub_code,
.code.size = sizeof(nvf0_grhub_code),
@@ -219,7 +219,7 @@ nvf0_graph_fecs_ucode = {
#include "fuc/gpcnvf0.fuc.h"
-static struct nvc0_graph_ucode
+struct nvc0_graph_ucode
nvf0_graph_gpccs_ucode = {
.code.data = nvf0_grgpc_code,
.code.size = sizeof(nvf0_grgpc_code),
@@ -241,4 +241,5 @@ nvf0_graph_oclass = &(struct nvc0_graph_oclass) {
.mmio = nvf0_graph_pack_mmio,
.fecs.ucode = &nvf0_graph_fecs_ucode,
.gpccs.ucode = &nvf0_graph_gpccs_ucode,
+ .ppc_nr = 2,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 7eb6d94c84e2..d88c700b2f69 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -24,7 +24,6 @@
#include <core/client.h>
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/handle.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index d4e7ec0ba68c..bdb2f20ff7b1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
index 3d8c2133e0e8..72c7f33fd29b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv44.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/client.h>
#include <core/engctx.h>
#include <core/handle.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 37a2bd9e8078..cae33f86b11a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
index 96f5aa92677b..e9cc8b116a24 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <subdev/vm.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
index e9c5e51943ef..63013812f7c9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/perfmon/base.c
@@ -22,8 +22,11 @@
* Authors: Ben Skeggs
*/
+#include <core/client.h>
#include <core/option.h>
-#include <core/class.h>
+#include <nvif/unpack.h>
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
#include <subdev/clock.h>
@@ -101,24 +104,28 @@ nouveau_perfsig_wrap(struct nouveau_perfmon *ppm, const char *name,
* Perfmon object classes
******************************************************************************/
static int
-nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_perfctr_query(struct nouveau_object *object, void *data, u32 size)
{
+ union {
+ struct nvif_perfctr_query_v0 v0;
+ } *args = data;
struct nouveau_device *device = nv_device(object);
struct nouveau_perfmon *ppm = (void *)object->engine;
struct nouveau_perfdom *dom = NULL, *chk;
- struct nv_perfctr_query *args = data;
const bool all = nouveau_boolopt(device->cfgopt, "NvPmShowAll", false);
const bool raw = nouveau_boolopt(device->cfgopt, "NvPmUnnamed", all);
const char *name;
int tmp = 0, di, si;
- char path[64];
-
- if (size < sizeof(*args))
- return -EINVAL;
+ int ret;
- di = (args->iter & 0xff000000) >> 24;
- si = (args->iter & 0x00ffffff) - 1;
+ nv_ioctl(object, "perfctr query size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "perfctr query vers %d iter %08x\n",
+ args->v0.version, args->v0.iter);
+ di = (args->v0.iter & 0xff000000) >> 24;
+ si = (args->v0.iter & 0x00ffffff) - 1;
+ } else
+ return ret;
list_for_each_entry(chk, &ppm->domains, head) {
if (tmp++ == di) {
@@ -132,19 +139,17 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
if (si >= 0) {
if (raw || !(name = dom->signal[si].name)) {
- snprintf(path, sizeof(path), "/%s/%02x", dom->name, si);
- name = path;
+ snprintf(args->v0.name, sizeof(args->v0.name),
+ "/%s/%02x", dom->name, si);
+ } else {
+ strncpy(args->v0.name, name, sizeof(args->v0.name));
}
-
- if (args->name)
- strncpy(args->name, name, args->size);
- args->size = strlen(name) + 1;
}
do {
while (++si < dom->signal_nr) {
if (all || dom->signal[si].name) {
- args->iter = (di << 24) | ++si;
+ args->v0.iter = (di << 24) | ++si;
return 0;
}
}
@@ -153,21 +158,26 @@ nouveau_perfctr_query(struct nouveau_object *object, u32 mthd,
dom = list_entry(dom->head.next, typeof(*dom), head);
} while (&dom->head != &ppm->domains);
- args->iter = 0xffffffff;
+ args->v0.iter = 0xffffffff;
return 0;
}
static int
-nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_perfctr_sample(struct nouveau_object *object, void *data, u32 size)
{
+ union {
+ struct nvif_perfctr_sample none;
+ } *args = data;
struct nouveau_perfmon *ppm = (void *)object->engine;
struct nouveau_perfctr *ctr, *tmp;
struct nouveau_perfdom *dom;
- struct nv_perfctr_sample *args = data;
+ int ret;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(object, "perfctr sample size %d\n", size);
+ if (nvif_unvers(args->none)) {
+ nv_ioctl(object, "perfctr sample\n");
+ } else
+ return ret;
ppm->sequence++;
list_for_each_entry(dom, &ppm->domains, head) {
@@ -206,22 +216,45 @@ nouveau_perfctr_sample(struct nouveau_object *object, u32 mthd,
}
static int
-nouveau_perfctr_read(struct nouveau_object *object, u32 mthd,
- void *data, u32 size)
+nouveau_perfctr_read(struct nouveau_object *object, void *data, u32 size)
{
+ union {
+ struct nvif_perfctr_read_v0 v0;
+ } *args = data;
struct nouveau_perfctr *ctr = (void *)object;
- struct nv_perfctr_read *args = data;
+ int ret;
+
+ nv_ioctl(object, "perfctr read size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(object, "perfctr read vers %d\n", args->v0.version);
+ } else
+ return ret;
- if (size < sizeof(*args))
- return -EINVAL;
if (!ctr->clk)
return -EAGAIN;
- args->clk = ctr->clk;
- args->ctr = ctr->ctr;
+ args->v0.clk = ctr->clk;
+ args->v0.ctr = ctr->ctr;
return 0;
}
+static int
+nouveau_perfctr_mthd(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ switch (mthd) {
+ case NVIF_PERFCTR_V0_QUERY:
+ return nouveau_perfctr_query(object, data, size);
+ case NVIF_PERFCTR_V0_SAMPLE:
+ return nouveau_perfctr_sample(object, data, size);
+ case NVIF_PERFCTR_V0_READ:
+ return nouveau_perfctr_read(object, data, size);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_perfctr_dtor(struct nouveau_object *object)
{
@@ -237,19 +270,27 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
+ union {
+ struct nvif_perfctr_v0 v0;
+ } *args = data;
struct nouveau_perfmon *ppm = (void *)engine;
struct nouveau_perfdom *dom = NULL;
struct nouveau_perfsig *sig[4] = {};
struct nouveau_perfctr *ctr;
- struct nv_perfctr_class *args = data;
int ret, i;
- if (size < sizeof(*args))
- return -EINVAL;
+ nv_ioctl(parent, "create perfctr size %d\n", size);
+ if (nvif_unpack(args->v0, 0, 0, false)) {
+ nv_ioctl(parent, "create perfctr vers %d logic_op %04x\n",
+ args->v0.version, args->v0.logic_op);
+ } else
+ return ret;
- for (i = 0; i < ARRAY_SIZE(args->signal) && args->signal[i].name; i++) {
- sig[i] = nouveau_perfsig_find(ppm, args->signal[i].name,
- args->signal[i].size, &dom);
+ for (i = 0; i < ARRAY_SIZE(args->v0.name) && args->v0.name[i][0]; i++) {
+ sig[i] = nouveau_perfsig_find(ppm, args->v0.name[i],
+ strnlen(args->v0.name[i],
+ sizeof(args->v0.name[i])),
+ &dom);
if (!sig[i])
return -EINVAL;
}
@@ -260,7 +301,7 @@ nouveau_perfctr_ctor(struct nouveau_object *parent,
return ret;
ctr->slot = -1;
- ctr->logic_op = args->logic_op;
+ ctr->logic_op = args->v0.logic_op;
ctr->signal[0] = sig[0];
ctr->signal[1] = sig[1];
ctr->signal[2] = sig[2];
@@ -276,21 +317,13 @@ nouveau_perfctr_ofuncs = {
.dtor = nouveau_perfctr_dtor,
.init = nouveau_object_init,
.fini = nouveau_object_fini,
-};
-
-static struct nouveau_omthds
-nouveau_perfctr_omthds[] = {
- { NV_PERFCTR_QUERY, NV_PERFCTR_QUERY, nouveau_perfctr_query },
- { NV_PERFCTR_SAMPLE, NV_PERFCTR_SAMPLE, nouveau_perfctr_sample },
- { NV_PERFCTR_READ, NV_PERFCTR_READ, nouveau_perfctr_read },
- {}
+ .mthd = nouveau_perfctr_mthd,
};
struct nouveau_oclass
nouveau_perfmon_sclass[] = {
- { .handle = NV_PERFCTR_CLASS,
+ { .handle = NVIF_IOCTL_NEW_V0_PERFCTR,
.ofuncs = &nouveau_perfctr_ofuncs,
- .omthds = nouveau_perfctr_omthds,
},
{},
};
@@ -303,6 +336,7 @@ nouveau_perfctx_dtor(struct nouveau_object *object)
{
struct nouveau_perfmon *ppm = (void *)object->engine;
mutex_lock(&nv_subdev(ppm)->mutex);
+ nouveau_engctx_destroy(&ppm->context->base);
ppm->context = NULL;
mutex_unlock(&nv_subdev(ppm)->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index c571758e4a27..64df15c7f051 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index a62f11a78430..f54a2253deca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <engine/software.h>
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index f3b4d9dbf23c..4d2994d8cc32 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -23,12 +23,12 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/namedb.h>
#include <core/handle.h>
#include <core/gpuobj.h>
#include <core/event.h>
+#include <nvif/event.h>
#include <subdev/bar.h>
@@ -86,10 +86,10 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
{
struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
u32 head = *(u32 *)args;
- if (head >= chan->vblank.nr_event)
+ if (head >= nouveau_disp(chan)->vblank.index_nr)
return -EINVAL;
- nouveau_event_get(chan->vblank.event[head]);
+ nvkm_notify_get(&chan->vblank.notify[head]);
return 0;
}
@@ -124,9 +124,10 @@ nv50_software_sclass[] = {
******************************************************************************/
static int
-nv50_software_vblsem_release(void *data, u32 type, int head)
+nv50_software_vblsem_release(struct nvkm_notify *notify)
{
- struct nv50_software_chan *chan = data;
+ struct nv50_software_chan *chan =
+ container_of(notify, typeof(*chan), vblank.notify[notify->index]);
struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
struct nouveau_bar *bar = nouveau_bar(priv);
@@ -142,7 +143,7 @@ nv50_software_vblsem_release(void *data, u32 type, int head)
nv_wr32(priv, 0x060014, chan->vblank.value);
}
- return NVKM_EVENT_DROP;
+ return NVKM_NOTIFY_DROP;
}
void
@@ -151,11 +152,8 @@ nv50_software_context_dtor(struct nouveau_object *object)
struct nv50_software_chan *chan = (void *)object;
int i;
- if (chan->vblank.event) {
- for (i = 0; i < chan->vblank.nr_event; i++)
- nouveau_event_ref(NULL, &chan->vblank.event[i]);
- kfree(chan->vblank.event);
- }
+ for (i = 0; i < ARRAY_SIZE(chan->vblank.notify); i++)
+ nvkm_notify_fini(&chan->vblank.notify[i]);
nouveau_software_context_destroy(&chan->base);
}
@@ -176,15 +174,14 @@ nv50_software_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
- chan->vblank.nr_event = pdisp ? pdisp->vblank->index_nr : 0;
- chan->vblank.event = kzalloc(chan->vblank.nr_event *
- sizeof(*chan->vblank.event), GFP_KERNEL);
- if (!chan->vblank.event)
- return -ENOMEM;
-
- for (i = 0; i < chan->vblank.nr_event; i++) {
- ret = nouveau_event_new(pdisp->vblank, 1, i, pclass->vblank,
- chan, &chan->vblank.event[i]);
+ for (i = 0; pdisp && i < pdisp->vblank.index_nr; i++) {
+ ret = nvkm_notify_init(&pdisp->vblank, pclass->vblank, false,
+ &(struct nvif_notify_head_req_v0) {
+ .head = i,
+ },
+ sizeof(struct nvif_notify_head_req_v0),
+ sizeof(struct nvif_notify_head_rep_v0),
+ &chan->vblank.notify[i]);
if (ret)
return ret;
}
@@ -198,7 +195,7 @@ nv50_software_cclass = {
.base.handle = NV_ENGCTX(SW, 0x50),
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_context_ctor,
- .dtor = _nouveau_software_context_dtor,
+ .dtor = nv50_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
index bb49a7a20857..41542e725b4b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.h
@@ -19,14 +19,13 @@ int nv50_software_ctor(struct nouveau_object *, struct nouveau_object *,
struct nv50_software_cclass {
struct nouveau_oclass base;
- int (*vblank)(void *, u32, int);
+ int (*vblank)(struct nvkm_notify *);
};
struct nv50_software_chan {
struct nouveau_software_chan base;
struct {
- struct nouveau_eventh **event;
- int nr_event;
+ struct nvkm_notify notify[4];
u32 channel;
u32 ctxdma;
u64 offset;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index 135c20f38356..6af370d3a06d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -23,7 +23,6 @@
*/
#include <core/os.h>
-#include <core/class.h>
#include <core/engctx.h>
#include <core/event.h>
@@ -104,9 +103,10 @@ nvc0_software_sclass[] = {
******************************************************************************/
static int
-nvc0_software_vblsem_release(void *data, u32 type, int head)
+nvc0_software_vblsem_release(struct nvkm_notify *notify)
{
- struct nv50_software_chan *chan = data;
+ struct nv50_software_chan *chan =
+ container_of(notify, typeof(*chan), vblank.notify[notify->index]);
struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
struct nouveau_bar *bar = nouveau_bar(priv);
@@ -116,7 +116,7 @@ nvc0_software_vblsem_release(void *data, u32 type, int head)
nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
nv_wr32(priv, 0x060014, chan->vblank.value);
- return NVKM_EVENT_DROP;
+ return NVKM_NOTIFY_DROP;
}
static struct nv50_software_cclass
@@ -124,7 +124,7 @@ nvc0_software_cclass = {
.base.handle = NV_ENGCTX(SW, 0xc0),
.base.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_software_context_ctor,
- .dtor = _nouveau_software_context_dtor,
+ .dtor = nv50_software_context_dtor,
.init = _nouveau_software_context_init,
.fini = _nouveau_software_context_fini,
},
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
deleted file mode 100644
index e0c812bc884f..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ /dev/null
@@ -1,470 +0,0 @@
-#ifndef __NOUVEAU_CLASS_H__
-#define __NOUVEAU_CLASS_H__
-
-/* Device class
- *
- * 0080: NV_DEVICE
- */
-#define NV_DEVICE_CLASS 0x00000080
-
-#define NV_DEVICE_DISABLE_IDENTIFY 0x0000000000000001ULL
-#define NV_DEVICE_DISABLE_MMIO 0x0000000000000002ULL
-#define NV_DEVICE_DISABLE_VBIOS 0x0000000000000004ULL
-#define NV_DEVICE_DISABLE_CORE 0x0000000000000008ULL
-#define NV_DEVICE_DISABLE_DISP 0x0000000000010000ULL
-#define NV_DEVICE_DISABLE_FIFO 0x0000000000020000ULL
-#define NV_DEVICE_DISABLE_GRAPH 0x0000000100000000ULL
-#define NV_DEVICE_DISABLE_MPEG 0x0000000200000000ULL
-#define NV_DEVICE_DISABLE_ME 0x0000000400000000ULL
-#define NV_DEVICE_DISABLE_VP 0x0000000800000000ULL
-#define NV_DEVICE_DISABLE_CRYPT 0x0000001000000000ULL
-#define NV_DEVICE_DISABLE_BSP 0x0000002000000000ULL
-#define NV_DEVICE_DISABLE_PPP 0x0000004000000000ULL
-#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
-#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
-#define NV_DEVICE_DISABLE_VIC 0x0000020000000000ULL
-#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
-
-struct nv_device_class {
- u64 device; /* device identifier, ~0 for client default */
- u64 disable; /* disable particular subsystems */
- u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
-};
-
-/* DMA object classes
- *
- * 0002: NV_DMA_FROM_MEMORY
- * 0003: NV_DMA_TO_MEMORY
- * 003d: NV_DMA_IN_MEMORY
- */
-#define NV_DMA_FROM_MEMORY_CLASS 0x00000002
-#define NV_DMA_TO_MEMORY_CLASS 0x00000003
-#define NV_DMA_IN_MEMORY_CLASS 0x0000003d
-
-#define NV_DMA_TARGET_MASK 0x000000ff
-#define NV_DMA_TARGET_VM 0x00000000
-#define NV_DMA_TARGET_VRAM 0x00000001
-#define NV_DMA_TARGET_PCI 0x00000002
-#define NV_DMA_TARGET_PCI_US 0x00000003
-#define NV_DMA_TARGET_AGP 0x00000004
-#define NV_DMA_ACCESS_MASK 0x00000f00
-#define NV_DMA_ACCESS_VM 0x00000000
-#define NV_DMA_ACCESS_RD 0x00000100
-#define NV_DMA_ACCESS_WR 0x00000200
-#define NV_DMA_ACCESS_RDWR 0x00000300
-
-/* NV50:NVC0 */
-#define NV50_DMA_CONF0_ENABLE 0x80000000
-#define NV50_DMA_CONF0_PRIV 0x00300000
-#define NV50_DMA_CONF0_PRIV_VM 0x00000000
-#define NV50_DMA_CONF0_PRIV_US 0x00100000
-#define NV50_DMA_CONF0_PRIV__S 0x00200000
-#define NV50_DMA_CONF0_PART 0x00030000
-#define NV50_DMA_CONF0_PART_VM 0x00000000
-#define NV50_DMA_CONF0_PART_256 0x00010000
-#define NV50_DMA_CONF0_PART_1KB 0x00020000
-#define NV50_DMA_CONF0_COMP 0x00000180
-#define NV50_DMA_CONF0_COMP_NONE 0x00000000
-#define NV50_DMA_CONF0_COMP_VM 0x00000180
-#define NV50_DMA_CONF0_TYPE 0x0000007f
-#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
-#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
-
-/* NVC0:NVD9 */
-#define NVC0_DMA_CONF0_ENABLE 0x80000000
-#define NVC0_DMA_CONF0_PRIV 0x00300000
-#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
-#define NVC0_DMA_CONF0_PRIV_US 0x00100000
-#define NVC0_DMA_CONF0_PRIV__S 0x00200000
-#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
-#define NVC0_DMA_CONF0_TYPE 0x000000ff
-#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
-#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
-
-/* NVD9- */
-#define NVD0_DMA_CONF0_ENABLE 0x80000000
-#define NVD0_DMA_CONF0_PAGE 0x00000400
-#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
-#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
-#define NVD0_DMA_CONF0_TYPE 0x000000ff
-#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
-#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
-
-struct nv_dma_class {
- u32 flags;
- u32 pad0;
- u64 start;
- u64 limit;
- u32 conf0;
-};
-
-/* Perfmon counter class
- *
- * XXXX: NV_PERFCTR
- */
-#define NV_PERFCTR_CLASS 0x0000ffff
-#define NV_PERFCTR_QUERY 0x00000000
-#define NV_PERFCTR_SAMPLE 0x00000001
-#define NV_PERFCTR_READ 0x00000002
-
-struct nv_perfctr_class {
- u16 logic_op;
- struct {
- char __user *name; /*XXX: use cfu when exposed to userspace */
- u32 size;
- } signal[4];
-};
-
-struct nv_perfctr_query {
- u32 iter;
- u32 size;
- char __user *name; /*XXX: use ctu when exposed to userspace */
-};
-
-struct nv_perfctr_sample {
-};
-
-struct nv_perfctr_read {
- u32 ctr;
- u32 clk;
-};
-
-/* Device control class
- *
- * XXXX: NV_CONTROL
- */
-#define NV_CONTROL_CLASS 0x0000fffe
-
-#define NV_CONTROL_PSTATE_INFO 0x00000000
-#define NV_CONTROL_PSTATE_INFO_USTATE_DISABLE (-1)
-#define NV_CONTROL_PSTATE_INFO_USTATE_PERFMON (-2)
-#define NV_CONTROL_PSTATE_INFO_PSTATE_UNKNOWN (-1)
-#define NV_CONTROL_PSTATE_INFO_PSTATE_PERFMON (-2)
-#define NV_CONTROL_PSTATE_ATTR 0x00000001
-#define NV_CONTROL_PSTATE_ATTR_STATE_CURRENT (-1)
-#define NV_CONTROL_PSTATE_USER 0x00000002
-#define NV_CONTROL_PSTATE_USER_STATE_UNKNOWN (-1)
-#define NV_CONTROL_PSTATE_USER_STATE_PERFMON (-2)
-
-struct nv_control_pstate_info {
- u32 count; /* out: number of power states */
- s32 ustate; /* out: current target pstate index */
- u32 pstate; /* out: current pstate index */
-};
-
-struct nv_control_pstate_attr {
- s32 state; /* in: index of pstate to query
- * out: pstate identifier
- */
- u32 index; /* in: index of attribute to query
- * out: index of next attribute, or 0 if no more
- */
- char name[32];
- char unit[16];
- u32 min;
- u32 max;
-};
-
-struct nv_control_pstate_user {
- s32 state; /* in: pstate identifier */
-};
-
-/* DMA FIFO channel classes
- *
- * 006b: NV03_CHANNEL_DMA
- * 006e: NV10_CHANNEL_DMA
- * 176e: NV17_CHANNEL_DMA
- * 406e: NV40_CHANNEL_DMA
- * 506e: NV50_CHANNEL_DMA
- * 826e: NV84_CHANNEL_DMA
- */
-#define NV03_CHANNEL_DMA_CLASS 0x0000006b
-#define NV10_CHANNEL_DMA_CLASS 0x0000006e
-#define NV17_CHANNEL_DMA_CLASS 0x0000176e
-#define NV40_CHANNEL_DMA_CLASS 0x0000406e
-#define NV50_CHANNEL_DMA_CLASS 0x0000506e
-#define NV84_CHANNEL_DMA_CLASS 0x0000826e
-
-struct nv03_channel_dma_class {
- u32 pushbuf;
- u32 pad0;
- u64 offset;
-};
-
-/* Indirect FIFO channel classes
- *
- * 506f: NV50_CHANNEL_IND
- * 826f: NV84_CHANNEL_IND
- * 906f: NVC0_CHANNEL_IND
- * a06f: NVE0_CHANNEL_IND
- */
-
-#define NV50_CHANNEL_IND_CLASS 0x0000506f
-#define NV84_CHANNEL_IND_CLASS 0x0000826f
-#define NVC0_CHANNEL_IND_CLASS 0x0000906f
-#define NVE0_CHANNEL_IND_CLASS 0x0000a06f
-
-struct nv50_channel_ind_class {
- u32 pushbuf;
- u32 ilength;
- u64 ioffset;
-};
-
-#define NVE0_CHANNEL_IND_ENGINE_GR 0x00000001
-#define NVE0_CHANNEL_IND_ENGINE_VP 0x00000002
-#define NVE0_CHANNEL_IND_ENGINE_PPP 0x00000004
-#define NVE0_CHANNEL_IND_ENGINE_BSP 0x00000008
-#define NVE0_CHANNEL_IND_ENGINE_CE0 0x00000010
-#define NVE0_CHANNEL_IND_ENGINE_CE1 0x00000020
-#define NVE0_CHANNEL_IND_ENGINE_ENC 0x00000040
-
-struct nve0_channel_ind_class {
- u32 pushbuf;
- u32 ilength;
- u64 ioffset;
- u32 engine;
-};
-
-/* 0046: NV04_DISP
- */
-
-#define NV04_DISP_CLASS 0x00000046
-
-#define NV04_DISP_MTHD 0x00000000
-#define NV04_DISP_MTHD_HEAD 0x00000001
-
-#define NV04_DISP_SCANOUTPOS 0x00000000
-
-struct nv04_display_class {
-};
-
-struct nv04_display_scanoutpos {
- s64 time[2];
- u32 vblanks;
- u32 vblanke;
- u32 vtotal;
- u32 vline;
- u32 hblanks;
- u32 hblanke;
- u32 htotal;
- u32 hline;
-};
-
-/* 5070: NV50_DISP
- * 8270: NV84_DISP
- * 8370: NVA0_DISP
- * 8870: NV94_DISP
- * 8570: NVA3_DISP
- * 9070: NVD0_DISP
- * 9170: NVE0_DISP
- * 9270: NVF0_DISP
- * 9470: GM107_DISP
- */
-
-#define NV50_DISP_CLASS 0x00005070
-#define NV84_DISP_CLASS 0x00008270
-#define NVA0_DISP_CLASS 0x00008370
-#define NV94_DISP_CLASS 0x00008870
-#define NVA3_DISP_CLASS 0x00008570
-#define NVD0_DISP_CLASS 0x00009070
-#define NVE0_DISP_CLASS 0x00009170
-#define NVF0_DISP_CLASS 0x00009270
-#define GM107_DISP_CLASS 0x00009470
-
-#define NV50_DISP_MTHD 0x00000000
-#define NV50_DISP_MTHD_HEAD 0x00000003
-
-#define NV50_DISP_SCANOUTPOS 0x00000000
-
-#define NV50_DISP_SOR_MTHD 0x00010000
-#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
-#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
-#define NV50_DISP_SOR_MTHD_LINK 0x00000004
-#define NV50_DISP_SOR_MTHD_OR 0x00000003
-
-#define NV50_DISP_SOR_PWR 0x00010000
-#define NV50_DISP_SOR_PWR_STATE 0x00000001
-#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
-#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
-#define NVA3_DISP_SOR_HDA_ELD 0x00010100
-#define NV84_DISP_SOR_HDMI_PWR 0x00012000
-#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
-#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
-#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
-#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
-#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
-#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
-#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
-#define NV94_DISP_SOR_DP_PWR 0x00016000
-#define NV94_DISP_SOR_DP_PWR_STATE 0x00000001
-#define NV94_DISP_SOR_DP_PWR_STATE_OFF 0x00000000
-#define NV94_DISP_SOR_DP_PWR_STATE_ON 0x00000001
-
-#define NV50_DISP_DAC_MTHD 0x00020000
-#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
-#define NV50_DISP_DAC_MTHD_OR 0x00000003
-
-#define NV50_DISP_DAC_PWR 0x00020000
-#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
-#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
-#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
-#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
-#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
-#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
-#define NV50_DISP_DAC_PWR_DATA 0x00000010
-#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
-#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
-#define NV50_DISP_DAC_PWR_STATE 0x00000040
-#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
-#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
-#define NV50_DISP_DAC_LOAD 0x00020100
-#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
-
-#define NV50_DISP_PIOR_MTHD 0x00030000
-#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000
-#define NV50_DISP_PIOR_MTHD_OR 0x00000003
-
-#define NV50_DISP_PIOR_PWR 0x00030000
-#define NV50_DISP_PIOR_PWR_STATE 0x00000001
-#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001
-#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000
-#define NV50_DISP_PIOR_TMDS_PWR 0x00032000
-#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001
-#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001
-#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000
-#define NV50_DISP_PIOR_DP_PWR 0x00036000
-#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001
-#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001
-#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000
-
-struct nv50_display_class {
-};
-
-/* 507a: NV50_DISP_CURS
- * 827a: NV84_DISP_CURS
- * 837a: NVA0_DISP_CURS
- * 887a: NV94_DISP_CURS
- * 857a: NVA3_DISP_CURS
- * 907a: NVD0_DISP_CURS
- * 917a: NVE0_DISP_CURS
- * 927a: NVF0_DISP_CURS
- * 947a: GM107_DISP_CURS
- */
-
-#define NV50_DISP_CURS_CLASS 0x0000507a
-#define NV84_DISP_CURS_CLASS 0x0000827a
-#define NVA0_DISP_CURS_CLASS 0x0000837a
-#define NV94_DISP_CURS_CLASS 0x0000887a
-#define NVA3_DISP_CURS_CLASS 0x0000857a
-#define NVD0_DISP_CURS_CLASS 0x0000907a
-#define NVE0_DISP_CURS_CLASS 0x0000917a
-#define NVF0_DISP_CURS_CLASS 0x0000927a
-#define GM107_DISP_CURS_CLASS 0x0000947a
-
-struct nv50_display_curs_class {
- u32 head;
-};
-
-/* 507b: NV50_DISP_OIMM
- * 827b: NV84_DISP_OIMM
- * 837b: NVA0_DISP_OIMM
- * 887b: NV94_DISP_OIMM
- * 857b: NVA3_DISP_OIMM
- * 907b: NVD0_DISP_OIMM
- * 917b: NVE0_DISP_OIMM
- * 927b: NVE0_DISP_OIMM
- * 947b: GM107_DISP_OIMM
- */
-
-#define NV50_DISP_OIMM_CLASS 0x0000507b
-#define NV84_DISP_OIMM_CLASS 0x0000827b
-#define NVA0_DISP_OIMM_CLASS 0x0000837b
-#define NV94_DISP_OIMM_CLASS 0x0000887b
-#define NVA3_DISP_OIMM_CLASS 0x0000857b
-#define NVD0_DISP_OIMM_CLASS 0x0000907b
-#define NVE0_DISP_OIMM_CLASS 0x0000917b
-#define NVF0_DISP_OIMM_CLASS 0x0000927b
-#define GM107_DISP_OIMM_CLASS 0x0000947b
-
-struct nv50_display_oimm_class {
- u32 head;
-};
-
-/* 507c: NV50_DISP_SYNC
- * 827c: NV84_DISP_SYNC
- * 837c: NVA0_DISP_SYNC
- * 887c: NV94_DISP_SYNC
- * 857c: NVA3_DISP_SYNC
- * 907c: NVD0_DISP_SYNC
- * 917c: NVE0_DISP_SYNC
- * 927c: NVF0_DISP_SYNC
- * 947c: GM107_DISP_SYNC
- */
-
-#define NV50_DISP_SYNC_CLASS 0x0000507c
-#define NV84_DISP_SYNC_CLASS 0x0000827c
-#define NVA0_DISP_SYNC_CLASS 0x0000837c
-#define NV94_DISP_SYNC_CLASS 0x0000887c
-#define NVA3_DISP_SYNC_CLASS 0x0000857c
-#define NVD0_DISP_SYNC_CLASS 0x0000907c
-#define NVE0_DISP_SYNC_CLASS 0x0000917c
-#define NVF0_DISP_SYNC_CLASS 0x0000927c
-#define GM107_DISP_SYNC_CLASS 0x0000947c
-
-struct nv50_display_sync_class {
- u32 pushbuf;
- u32 head;
-};
-
-/* 507d: NV50_DISP_MAST
- * 827d: NV84_DISP_MAST
- * 837d: NVA0_DISP_MAST
- * 887d: NV94_DISP_MAST
- * 857d: NVA3_DISP_MAST
- * 907d: NVD0_DISP_MAST
- * 917d: NVE0_DISP_MAST
- * 927d: NVF0_DISP_MAST
- * 947d: GM107_DISP_MAST
- */
-
-#define NV50_DISP_MAST_CLASS 0x0000507d
-#define NV84_DISP_MAST_CLASS 0x0000827d
-#define NVA0_DISP_MAST_CLASS 0x0000837d
-#define NV94_DISP_MAST_CLASS 0x0000887d
-#define NVA3_DISP_MAST_CLASS 0x0000857d
-#define NVD0_DISP_MAST_CLASS 0x0000907d
-#define NVE0_DISP_MAST_CLASS 0x0000917d
-#define NVF0_DISP_MAST_CLASS 0x0000927d
-#define GM107_DISP_MAST_CLASS 0x0000947d
-
-struct nv50_display_mast_class {
- u32 pushbuf;
-};
-
-/* 507e: NV50_DISP_OVLY
- * 827e: NV84_DISP_OVLY
- * 837e: NVA0_DISP_OVLY
- * 887e: NV94_DISP_OVLY
- * 857e: NVA3_DISP_OVLY
- * 907e: NVD0_DISP_OVLY
- * 917e: NVE0_DISP_OVLY
- * 927e: NVF0_DISP_OVLY
- * 947e: GM107_DISP_OVLY
- */
-
-#define NV50_DISP_OVLY_CLASS 0x0000507e
-#define NV84_DISP_OVLY_CLASS 0x0000827e
-#define NVA0_DISP_OVLY_CLASS 0x0000837e
-#define NV94_DISP_OVLY_CLASS 0x0000887e
-#define NVA3_DISP_OVLY_CLASS 0x0000857e
-#define NVD0_DISP_OVLY_CLASS 0x0000907e
-#define NVE0_DISP_OVLY_CLASS 0x0000917e
-#define NVF0_DISP_OVLY_CLASS 0x0000927e
-#define GM107_DISP_OVLY_CLASS 0x0000947e
-
-struct nv50_display_ovly_class {
- u32 pushbuf;
- u32 head;
-};
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index c66eac513803..4fc6ab12382d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -10,6 +10,11 @@ struct nouveau_client {
char name[32];
u32 debug;
struct nouveau_vm *vm;
+ bool super;
+ void *data;
+
+ int (*ntfy)(const void *, u32, const void *, u32);
+ struct nvkm_client_notify *notify[8];
};
static inline struct nouveau_client *
@@ -43,4 +48,10 @@ int nouveau_client_init(struct nouveau_client *);
int nouveau_client_fini(struct nouveau_client *, bool suspend);
const char *nouveau_client_name(void *obj);
+int nvkm_client_notify_new(struct nouveau_client *, struct nvkm_event *,
+ void *data, u32 size);
+int nvkm_client_notify_del(struct nouveau_client *, int index);
+int nvkm_client_notify_get(struct nouveau_client *, int index);
+int nvkm_client_notify_put(struct nouveau_client *, int index);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index a8a9a9cf16cb..8743766454a5 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -4,6 +4,7 @@
#include <core/object.h>
#include <core/subdev.h>
#include <core/engine.h>
+#include <core/event.h>
enum nv_subdev_type {
NVDEV_ENGINE_DEVICE,
@@ -28,7 +29,7 @@ enum nv_subdev_type {
NVDEV_SUBDEV_BUS,
NVDEV_SUBDEV_TIMER,
NVDEV_SUBDEV_FB,
- NVDEV_SUBDEV_LTCG,
+ NVDEV_SUBDEV_LTC,
NVDEV_SUBDEV_IBUS,
NVDEV_SUBDEV_INSTMEM,
NVDEV_SUBDEV_VM,
@@ -69,6 +70,8 @@ struct nouveau_device {
struct platform_device *platformdev;
u64 handle;
+ struct nvkm_event event;
+
const char *cfgopt;
const char *dbgopt;
const char *name;
@@ -84,7 +87,6 @@ struct nouveau_device {
NV_40 = 0x40,
NV_50 = 0x50,
NV_C0 = 0xc0,
- NV_D0 = 0xd0,
NV_E0 = 0xe0,
GM100 = 0x110,
} card_type;
@@ -93,8 +95,14 @@ struct nouveau_device {
struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
struct nouveau_object *subdev[NVDEV_SUBDEV_NR];
+
+ struct {
+ struct notifier_block nb;
+ } acpi;
};
+int nouveau_device_list(u64 *name, int size);
+
static inline struct nouveau_device *
nv_device(void *obj)
{
@@ -162,12 +170,6 @@ nv_device_resource_start(struct nouveau_device *device, unsigned int bar);
resource_size_t
nv_device_resource_len(struct nouveau_device *device, unsigned int bar);
-dma_addr_t
-nv_device_map_page(struct nouveau_device *device, struct page *page);
-
-void
-nv_device_unmap_page(struct nouveau_device *device, dma_addr_t addr);
-
int
nv_device_get_irq(struct nouveau_device *device, bool stall);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
index ba3f1a76a815..51e55d03330a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/event.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -1,47 +1,34 @@
#ifndef __NVKM_EVENT_H__
#define __NVKM_EVENT_H__
-/* return codes from event handlers */
-#define NVKM_EVENT_DROP 0
-#define NVKM_EVENT_KEEP 1
+#include <core/notify.h>
-/* nouveau_eventh.flags bit #s */
-#define NVKM_EVENT_ENABLE 0
-
-struct nouveau_eventh {
- struct nouveau_event *event;
- struct list_head head;
- unsigned long flags;
- u32 types;
- int index;
- int (*func)(void *, u32, int);
- void *priv;
+struct nvkm_event_func {
+ int (*ctor)(void *data, u32 size, struct nvkm_notify *);
+ void (*send)(void *data, u32 size, struct nvkm_notify *);
+ void (*init)(struct nvkm_event *, int type, int index);
+ void (*fini)(struct nvkm_event *, int type, int index);
};
-struct nouveau_event {
- void *priv;
- int (*check)(struct nouveau_event *, u32 type, int index);
- void (*enable)(struct nouveau_event *, int type, int index);
- void (*disable)(struct nouveau_event *, int type, int index);
+struct nvkm_event {
+ const struct nvkm_event_func *func;
int types_nr;
int index_nr;
- spinlock_t list_lock;
- struct list_head *list;
spinlock_t refs_lock;
- int refs[];
+ spinlock_t list_lock;
+ struct list_head list;
+ int *refs;
};
-int nouveau_event_create(int types_nr, int index_nr, struct nouveau_event **);
-void nouveau_event_destroy(struct nouveau_event **);
-void nouveau_event_trigger(struct nouveau_event *, u32 types, int index);
-
-int nouveau_event_new(struct nouveau_event *, u32 types, int index,
- int (*func)(void *, u32, int), void *,
- struct nouveau_eventh **);
-void nouveau_event_ref(struct nouveau_eventh *, struct nouveau_eventh **);
-void nouveau_event_get(struct nouveau_eventh *);
-void nouveau_event_put(struct nouveau_eventh *);
+int nvkm_event_init(const struct nvkm_event_func *func,
+ int types_nr, int index_nr,
+ struct nvkm_event *);
+void nvkm_event_fini(struct nvkm_event *);
+void nvkm_event_get(struct nvkm_event *, u32 types, int index);
+void nvkm_event_put(struct nvkm_event *, u32 types, int index);
+void nvkm_event_send(struct nvkm_event *, u32 types, int index,
+ void *data, u32 size);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
index 363674cdf8ab..ceb67d770875 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/handle.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -10,6 +10,9 @@ struct nouveau_handle {
u32 name;
u32 priv;
+ u8 route;
+ u64 token;
+
struct nouveau_handle *parent;
struct nouveau_object *object;
};
@@ -20,6 +23,11 @@ void nouveau_handle_destroy(struct nouveau_handle *);
int nouveau_handle_init(struct nouveau_handle *);
int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
+int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle,
+ u16 oclass, void *data, u32 size,
+ struct nouveau_object **);
+int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle);
+
struct nouveau_object *
nouveau_handle_ref(struct nouveau_object *, u32 name);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ioctl.h b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
new file mode 100644
index 000000000000..ac7935c2474e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/ioctl.h
@@ -0,0 +1,6 @@
+#ifndef __NVKM_IOCTL_H__
+#define __NVKM_IOCTL_H__
+
+int nvkm_ioctl(struct nouveau_client *, bool, void *, u32, void **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/notify.h b/drivers/gpu/drm/nouveau/core/include/core/notify.h
new file mode 100644
index 000000000000..1262d8f020f3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/notify.h
@@ -0,0 +1,36 @@
+#ifndef __NVKM_NOTIFY_H__
+#define __NVKM_NOTIFY_H__
+
+struct nvkm_notify {
+ struct nvkm_event *event;
+ struct list_head head;
+#define NVKM_NOTIFY_USER 0
+#define NVKM_NOTIFY_WORK 1
+ unsigned long flags;
+ int block;
+#define NVKM_NOTIFY_DROP 0
+#define NVKM_NOTIFY_KEEP 1
+ int (*func)(struct nvkm_notify *);
+
+ /* set by nvkm_event ctor */
+ u32 types;
+ int index;
+ u32 size;
+
+ struct work_struct work;
+ /* this is const for a *very* good reason - the data might be on the
+ * stack from an irq handler. if you're not core/notify.c then you
+ * should probably think twice before casting it away...
+ */
+ const void *data;
+};
+
+int nvkm_notify_init(struct nvkm_event *, int (*func)(struct nvkm_notify *),
+ bool work, void *data, u32 size, u32 reply,
+ struct nvkm_notify *);
+void nvkm_notify_fini(struct nvkm_notify *);
+void nvkm_notify_get(struct nvkm_notify *);
+void nvkm_notify_put(struct nvkm_notify *);
+void nvkm_notify_send(struct nvkm_notify *, void *data, u32 size);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 62e68baef087..d7039482d6fd 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -48,6 +48,10 @@ void nouveau_object_destroy(struct nouveau_object *);
int nouveau_object_init(struct nouveau_object *);
int nouveau_object_fini(struct nouveau_object *, bool suspend);
+int _nouveau_object_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
extern struct nouveau_ofuncs nouveau_object_ofuncs;
/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
@@ -78,6 +82,7 @@ struct nouveau_omthds {
int (*call)(struct nouveau_object *, u32, void *, u32);
};
+struct nvkm_event;
struct nouveau_ofuncs {
int (*ctor)(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, void *data, u32 size,
@@ -85,6 +90,9 @@ struct nouveau_ofuncs {
void (*dtor)(struct nouveau_object *);
int (*init)(struct nouveau_object *);
int (*fini)(struct nouveau_object *, bool suspend);
+ int (*mthd)(struct nouveau_object *, u32, void *, u32);
+ int (*ntfy)(struct nouveau_object *, u32, struct nvkm_event **);
+ int (* map)(struct nouveau_object *, u64 *, u32 *);
u8 (*rd08)(struct nouveau_object *, u64 offset);
u16 (*rd16)(struct nouveau_object *, u64 offset);
u32 (*rd32)(struct nouveau_object *, u64 offset);
@@ -106,10 +114,6 @@ void nouveau_object_ref(struct nouveau_object *, struct nouveau_object **);
int nouveau_object_inc(struct nouveau_object *);
int nouveau_object_dec(struct nouveau_object *, bool suspend);
-int nouveau_object_new(struct nouveau_object *, u32 parent, u32 handle,
- u16 oclass, void *data, u32 size,
- struct nouveau_object **);
-int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
void nouveau_object_debug(void);
static inline int
@@ -199,4 +203,21 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
return 0;
}
+#include <core/handle.h>
+
+static inline int
+nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle,
+ u16 oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ return nouveau_handle_new(client, parent, handle, oclass,
+ data, size, pobject);
+}
+
+static inline int
+nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle)
+{
+ return nouveau_handle_del(client, parent, handle);
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 9f5ea900ff00..12da418ec70a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -57,5 +57,6 @@ void _nouveau_parent_dtor(struct nouveau_object *);
int nouveau_parent_sclass(struct nouveau_object *, u16 handle,
struct nouveau_object **pengine,
struct nouveau_oclass **poclass);
+int nouveau_parent_lclass(struct nouveau_object *, u32 *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index 0f9a37bd32b0..451b6ed20b7e 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -21,6 +21,7 @@ nv_printk_(struct nouveau_object *, int, const char *, ...);
#define nv_debug(o,f,a...) nv_printk((o), DEBUG, f, ##a)
#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
+#define nv_ioctl(o,f,a...) nv_trace(nouveau_client(o), "ioctl: "f, ##a)
#define nv_assert(f,a...) do { \
if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index fde842896806..7a64f347b385 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -6,20 +6,13 @@
#include <core/device.h>
#include <core/event.h>
-enum nvkm_hpd_event {
- NVKM_HPD_PLUG = 1,
- NVKM_HPD_UNPLUG = 2,
- NVKM_HPD_IRQ = 4,
- NVKM_HPD = (NVKM_HPD_PLUG | NVKM_HPD_UNPLUG | NVKM_HPD_IRQ)
-};
-
struct nouveau_disp {
struct nouveau_engine base;
struct list_head outp;
- struct nouveau_event *hpd;
- struct nouveau_event *vblank;
+ struct nvkm_event hpd;
+ struct nvkm_event vblank;
};
static inline struct nouveau_disp *
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index b28914ed1752..1b283a7b78e6 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,37 +12,20 @@ struct nouveau_dmaobj {
u32 access;
u64 start;
u64 limit;
- u32 conf0;
};
struct nouveau_dmaeng {
struct nouveau_engine base;
/* creates a "physical" dma object from a struct nouveau_dmaobj */
- int (*bind)(struct nouveau_dmaeng *dmaeng,
+ int (*bind)(struct nouveau_dmaobj *dmaobj,
struct nouveau_object *parent,
- struct nouveau_dmaobj *dmaobj,
struct nouveau_gpuobj **);
};
-#define nouveau_dmaeng_create(p,e,c,d) \
- nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
-#define nouveau_dmaeng_destroy(p) \
- nouveau_engine_destroy(&(p)->base)
-#define nouveau_dmaeng_init(p) \
- nouveau_engine_init(&(p)->base)
-#define nouveau_dmaeng_fini(p,s) \
- nouveau_engine_fini(&(p)->base, (s))
-
-#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
-#define _nouveau_dmaeng_init _nouveau_engine_init
-#define _nouveau_dmaeng_fini _nouveau_engine_fini
-
-extern struct nouveau_oclass nv04_dmaeng_oclass;
-extern struct nouveau_oclass nv50_dmaeng_oclass;
-extern struct nouveau_oclass nvc0_dmaeng_oclass;
-extern struct nouveau_oclass nvd0_dmaeng_oclass;
-
-extern struct nouveau_oclass nouveau_dmaobj_sclass[];
+extern struct nouveau_oclass *nv04_dmaeng_oclass;
+extern struct nouveau_oclass *nv50_dmaeng_oclass;
+extern struct nouveau_oclass *nvc0_dmaeng_oclass;
+extern struct nouveau_oclass *nvd0_dmaeng_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index b639eb2c74ff..e5e4d930b2c2 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -4,12 +4,14 @@
#include <core/namedb.h>
#include <core/gpuobj.h>
#include <core/engine.h>
+#include <core/event.h>
struct nouveau_fifo_chan {
struct nouveau_namedb base;
struct nouveau_dmaobj *pushdma;
struct nouveau_gpuobj *pushgpu;
void __iomem *user;
+ u64 addr;
u32 size;
u16 chid;
atomic_t refcnt; /* NV04_NVSW_SET_REF */
@@ -40,8 +42,10 @@ void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
void _nouveau_fifo_channel_dtor(struct nouveau_object *);
+int _nouveau_fifo_channel_map(struct nouveau_object *, u64 *, u32 *);
u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
+int _nouveau_fifo_channel_ntfy(struct nouveau_object *, u32, struct nvkm_event **);
struct nouveau_fifo_base {
struct nouveau_gpuobj base;
@@ -65,8 +69,8 @@ struct nouveau_fifo_base {
struct nouveau_fifo {
struct nouveau_engine base;
- struct nouveau_event *cevent; /* channel creation event */
- struct nouveau_event *uevent; /* async user trigger */
+ struct nvkm_event cevent; /* channel creation event */
+ struct nvkm_event uevent; /* async user trigger */
struct nouveau_object **channel;
spinlock_t lock;
@@ -112,6 +116,9 @@ extern struct nouveau_oclass *nve0_fifo_oclass;
extern struct nouveau_oclass *gk20a_fifo_oclass;
extern struct nouveau_oclass *nv108_fifo_oclass;
+int nouveau_fifo_uevent_ctor(void *, u32, struct nvkm_notify *);
+void nouveau_fifo_uevent(struct nouveau_fifo *);
+
void nv04_fifo_intr(struct nouveau_subdev *);
int nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
index 8c1d4772da0c..d5055570d01b 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/graph.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -70,6 +70,7 @@ extern struct nouveau_oclass *nvd9_graph_oclass;
extern struct nouveau_oclass *nve4_graph_oclass;
extern struct nouveau_oclass *gk20a_graph_oclass;
extern struct nouveau_oclass *nvf0_graph_oclass;
+extern struct nouveau_oclass *gk110b_graph_oclass;
extern struct nouveau_oclass *nv108_graph_oclass;
extern struct nouveau_oclass *gm107_graph_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
index 49b0024910fe..88cc812baaa3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/perfmon.h
@@ -4,7 +4,6 @@
#include <core/device.h>
#include <core/engine.h>
#include <core/engctx.h>
-#include <core/class.h>
struct nouveau_perfdom;
struct nouveau_perfctr;
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/class.h b/drivers/gpu/drm/nouveau/core/include/nvif/class.h
new file mode 120000
index 000000000000..f1ac4859edd4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/class.h
@@ -0,0 +1 @@
+../../../nvif/class.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/event.h b/drivers/gpu/drm/nouveau/core/include/nvif/event.h
new file mode 120000
index 000000000000..1b798538a725
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/event.h
@@ -0,0 +1 @@
+../../../nvif/event.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
new file mode 120000
index 000000000000..8569c86907c5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/ioctl.h
@@ -0,0 +1 @@
+../../../nvif/ioctl.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
new file mode 120000
index 000000000000..69d99292bca4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/nvif/unpack.h
@@ -0,0 +1 @@
+../../../nvif/unpack.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
index 9faa98e67ad8..be037fac534c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bar.h
@@ -20,6 +20,9 @@ struct nouveau_bar {
u32 flags, struct nouveau_vma *);
void (*unmap)(struct nouveau_bar *, struct nouveau_vma *);
void (*flush)(struct nouveau_bar *);
+
+ /* whether the BAR supports to be ioremapped WC or should be uncached */
+ bool iomap_uncached;
};
static inline struct nouveau_bar *
@@ -30,5 +33,6 @@ nouveau_bar(void *obj)
extern struct nouveau_oclass nv50_bar_oclass;
extern struct nouveau_oclass nvc0_bar_oclass;
+extern struct nouveau_oclass gk20a_bar_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index c01e29c9f89a..a5ca00dd2f61 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -71,8 +71,15 @@ struct nouveau_clock {
struct list_head states;
int state_nr;
+ struct work_struct work;
+ wait_queue_head_t wait;
+ atomic_t waiting;
+
+ struct nvkm_notify pwrsrc_ntfy;
+ int pwrsrc;
int pstate; /* current */
- int ustate; /* user-requested (-1 disabled, -2 perfmon) */
+ int ustate_ac; /* user-requested (-1 disabled, -2 perfmon) */
+ int ustate_dc; /* user-requested (-1 disabled, -2 perfmon) */
int astate; /* perfmon adjustment (base) */
int tstate; /* thermal adjustment (max-) */
int dstate; /* display adjustment (min+) */
@@ -108,8 +115,9 @@ struct nouveau_clocks {
int mdiv;
};
-#define nouveau_clock_create(p,e,o,i,r,d) \
- nouveau_clock_create_((p), (e), (o), (i), (r), sizeof(**d), (void **)d)
+#define nouveau_clock_create(p,e,o,i,r,s,n,d) \
+ nouveau_clock_create_((p), (e), (o), (i), (r), (s), (n), sizeof(**d), \
+ (void **)d)
#define nouveau_clock_destroy(p) ({ \
struct nouveau_clock *clk = (p); \
_nouveau_clock_dtor(nv_object(clk)); \
@@ -118,15 +126,18 @@ struct nouveau_clocks {
struct nouveau_clock *clk = (p); \
_nouveau_clock_init(nv_object(clk)); \
})
-#define nouveau_clock_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
+#define nouveau_clock_fini(p,s) ({ \
+ struct nouveau_clock *clk = (p); \
+ _nouveau_clock_fini(nv_object(clk), (s)); \
+})
int nouveau_clock_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *,
- struct nouveau_clocks *, bool, int, void **);
+ struct nouveau_clocks *, struct nouveau_pstate *,
+ int, bool, int, void **);
void _nouveau_clock_dtor(struct nouveau_object *);
-int _nouveau_clock_init(struct nouveau_object *);
-#define _nouveau_clock_fini _nouveau_subdev_fini
+int _nouveau_clock_init(struct nouveau_object *);
+int _nouveau_clock_fini(struct nouveau_object *, bool);
extern struct nouveau_oclass nv04_clock_oclass;
extern struct nouveau_oclass nv40_clock_oclass;
@@ -136,6 +147,7 @@ extern struct nouveau_oclass *nvaa_clock_oclass;
extern struct nouveau_oclass nva3_clock_oclass;
extern struct nouveau_oclass nvc0_clock_oclass;
extern struct nouveau_oclass nve0_clock_oclass;
+extern struct nouveau_oclass gk20a_clock_oclass;
int nv04_clock_pll_set(struct nouveau_clock *, u32 type, u32 freq);
int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
@@ -145,7 +157,7 @@ int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
int clk, struct nouveau_pll_vals *);
-int nouveau_clock_ustate(struct nouveau_clock *, int req);
+int nouveau_clock_ustate(struct nouveau_clock *, int req, int pwr);
int nouveau_clock_astate(struct nouveau_clock *, int req, int rel);
int nouveau_clock_dstate(struct nouveau_clock *, int req, int rel);
int nouveau_clock_tstate(struct nouveau_clock *, int req, int rel);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 612d82ab683d..b73733d21cc7 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -8,16 +8,22 @@
#include <subdev/bios.h>
#include <subdev/bios/gpio.h>
-enum nvkm_gpio_event {
- NVKM_GPIO_HI = 1,
- NVKM_GPIO_LO = 2,
- NVKM_GPIO_TOGGLED = (NVKM_GPIO_HI | NVKM_GPIO_LO),
+struct nvkm_gpio_ntfy_req {
+#define NVKM_GPIO_HI 0x01
+#define NVKM_GPIO_LO 0x02
+#define NVKM_GPIO_TOGGLED 0x03
+ u8 mask;
+ u8 line;
+};
+
+struct nvkm_gpio_ntfy_rep {
+ u8 mask;
};
struct nouveau_gpio {
struct nouveau_subdev base;
- struct nouveau_event *events;
+ struct nvkm_event event;
void (*reset)(struct nouveau_gpio *, u8 func);
int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 825f7bb46b67..1b937c2c25ae 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -14,15 +14,18 @@
#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
-enum nvkm_i2c_event {
- NVKM_I2C_PLUG = 1,
- NVKM_I2C_UNPLUG = 2,
- NVKM_I2C_IRQ = 4,
- NVKM_I2C_DONE = 8,
- NVKM_I2C_ANY = (NVKM_I2C_PLUG |
- NVKM_I2C_UNPLUG |
- NVKM_I2C_IRQ |
- NVKM_I2C_DONE),
+struct nvkm_i2c_ntfy_req {
+#define NVKM_I2C_PLUG 0x01
+#define NVKM_I2C_UNPLUG 0x02
+#define NVKM_I2C_IRQ 0x04
+#define NVKM_I2C_DONE 0x08
+#define NVKM_I2C_ANY 0x0f
+ u8 mask;
+ u8 port;
+};
+
+struct nvkm_i2c_ntfy_rep {
+ u8 mask;
};
struct nouveau_i2c_port {
@@ -56,7 +59,7 @@ struct nouveau_i2c_board_info {
struct nouveau_i2c {
struct nouveau_subdev base;
- struct nouveau_event *ntfy;
+ struct nvkm_event event;
struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
new file mode 100644
index 000000000000..b909a7363f6b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/ltc.h
@@ -0,0 +1,35 @@
+#ifndef __NOUVEAU_LTC_H__
+#define __NOUVEAU_LTC_H__
+
+#include <core/subdev.h>
+#include <core/device.h>
+
+#define NOUVEAU_LTC_MAX_ZBC_CNT 16
+
+struct nouveau_mm_node;
+
+struct nouveau_ltc {
+ struct nouveau_subdev base;
+
+ int (*tags_alloc)(struct nouveau_ltc *, u32 count,
+ struct nouveau_mm_node **);
+ void (*tags_free)(struct nouveau_ltc *, struct nouveau_mm_node **);
+ void (*tags_clear)(struct nouveau_ltc *, u32 first, u32 count);
+
+ int zbc_min;
+ int zbc_max;
+ int (*zbc_color_get)(struct nouveau_ltc *, int index, const u32[4]);
+ int (*zbc_depth_get)(struct nouveau_ltc *, int index, const u32);
+};
+
+static inline struct nouveau_ltc *
+nouveau_ltc(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTC];
+}
+
+extern struct nouveau_oclass *gf100_ltc_oclass;
+extern struct nouveau_oclass *gk104_ltc_oclass;
+extern struct nouveau_oclass *gm107_ltc_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h b/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
deleted file mode 100644
index c9c1950b7743..000000000000
--- a/drivers/gpu/drm/nouveau/core/include/subdev/ltcg.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef __NOUVEAU_LTCG_H__
-#define __NOUVEAU_LTCG_H__
-
-#include <core/subdev.h>
-#include <core/device.h>
-
-struct nouveau_mm_node;
-
-struct nouveau_ltcg {
- struct nouveau_subdev base;
-
- int (*tags_alloc)(struct nouveau_ltcg *, u32 count,
- struct nouveau_mm_node **);
- void (*tags_free)(struct nouveau_ltcg *, struct nouveau_mm_node **);
- void (*tags_clear)(struct nouveau_ltcg *, u32 first, u32 count);
-};
-
-static inline struct nouveau_ltcg *
-nouveau_ltcg(void *obj)
-{
- return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_LTCG];
-}
-
-#define nouveau_ltcg_create(p,e,o,d) \
- nouveau_subdev_create_((p), (e), (o), 0, "PLTCG", "level2", \
- sizeof(**d), (void **)d)
-#define nouveau_ltcg_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_ltcg_init(p) \
- nouveau_subdev_init(&(p)->base)
-#define nouveau_ltcg_fini(p,s) \
- nouveau_subdev_fini(&(p)->base, (s))
-
-#define _nouveau_ltcg_dtor _nouveau_subdev_dtor
-#define _nouveau_ltcg_init _nouveau_subdev_init
-#define _nouveau_ltcg_fini _nouveau_subdev_fini
-
-extern struct nouveau_oclass *gf100_ltcg_oclass;
-extern struct nouveau_oclass *gm107_ltcg_oclass;
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index 72b176831be6..568e4dfc5e9e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -4,15 +4,11 @@
#include <core/subdev.h>
#include <core/device.h>
-struct nouveau_mc_intr {
- u32 stat;
- u32 unit;
-};
-
struct nouveau_mc {
struct nouveau_subdev base;
bool use_msi;
unsigned int irq;
+ void (*unk260)(struct nouveau_mc *, u32);
};
static inline struct nouveau_mc *
@@ -21,30 +17,6 @@ nouveau_mc(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
}
-#define nouveau_mc_create(p,e,o,d) \
- nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_mc_destroy(p) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
-})
-#define nouveau_mc_init(p) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
-})
-#define nouveau_mc_fini(p,s) ({ \
- struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
-})
-
-int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-void _nouveau_mc_dtor(struct nouveau_object *);
-int _nouveau_mc_init(struct nouveau_object *);
-int _nouveau_mc_fini(struct nouveau_object *, bool);
-
-struct nouveau_mc_oclass {
- struct nouveau_oclass base;
- const struct nouveau_mc_intr *intr;
- void (*msi_rearm)(struct nouveau_mc *);
-};
-
extern struct nouveau_oclass *nv04_mc_oclass;
extern struct nouveau_oclass *nv40_mc_oclass;
extern struct nouveau_oclass *nv44_mc_oclass;
@@ -54,5 +26,6 @@ extern struct nouveau_oclass *nv94_mc_oclass;
extern struct nouveau_oclass *nv98_mc_oclass;
extern struct nouveau_oclass *nvc0_mc_oclass;
extern struct nouveau_oclass *nvc3_mc_oclass;
+extern struct nouveau_oclass *gk20a_mc_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
index c5c92cbed33f..f73feec151db 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -8,18 +8,6 @@ struct nouveau_pwr {
struct nouveau_subdev base;
struct {
- u32 limit;
- u32 *data;
- u32 size;
- } code;
-
- struct {
- u32 limit;
- u32 *data;
- u32 size;
- } data;
-
- struct {
u32 base;
u32 size;
} send;
@@ -35,7 +23,8 @@ struct nouveau_pwr {
u32 data[2];
} recv;
- int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
+ int (*message)(struct nouveau_pwr *, u32[2], u32, u32, u32, u32);
+ void (*pgob)(struct nouveau_pwr *, bool);
};
static inline struct nouveau_pwr *
@@ -44,29 +33,11 @@ nouveau_pwr(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_PWR];
}
-#define nouveau_pwr_create(p, e, o, d) \
- nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
-#define nouveau_pwr_destroy(p) \
- nouveau_subdev_destroy(&(p)->base)
-#define nouveau_pwr_init(p) ({ \
- struct nouveau_pwr *ppwr = (p); \
- _nouveau_pwr_init(nv_object(ppwr)); \
-})
-#define nouveau_pwr_fini(p,s) ({ \
- struct nouveau_pwr *ppwr = (p); \
- _nouveau_pwr_fini(nv_object(ppwr), (s)); \
-})
-
-int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
-#define _nouveau_pwr_dtor _nouveau_subdev_dtor
-int _nouveau_pwr_init(struct nouveau_object *);
-int _nouveau_pwr_fini(struct nouveau_object *, bool);
-
-extern struct nouveau_oclass nva3_pwr_oclass;
-extern struct nouveau_oclass nvc0_pwr_oclass;
-extern struct nouveau_oclass nvd0_pwr_oclass;
-extern struct nouveau_oclass nv108_pwr_oclass;
+extern struct nouveau_oclass *nva3_pwr_oclass;
+extern struct nouveau_oclass *nvc0_pwr_oclass;
+extern struct nouveau_oclass *nvd0_pwr_oclass;
+extern struct nouveau_oclass *gk104_pwr_oclass;
+extern struct nouveau_oclass *nv108_pwr_oclass;
/* interface to MEMX process running on PPWR */
struct nouveau_memx;
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index d0ced94ca54c..ccfa21d72ddc 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -21,6 +21,8 @@
#include <linux/interrupt.h>
#include <linux/log2.h>
#include <linux/pm_runtime.h>
+#include <linux/power_supply.h>
+#include <linux/clk.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c
new file mode 100644
index 000000000000..bf877af9d3bd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/gk20a.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <subdev/bar.h>
+
+#include "priv.h"
+
+int
+gk20a_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar;
+ int ret;
+
+ ret = nvc0_bar_ctor(parent, engine, oclass, data, size, pobject);
+ if (ret)
+ return ret;
+
+ bar = (struct nouveau_bar *)*pobject;
+ bar->iomap_uncached = true;
+
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_bar_oclass = {
+ .handle = NV_SUBDEV(BAR, 0xea),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_bar_ctor,
+ .dtor = nvc0_bar_dtor,
+ .init = nvc0_bar_init,
+ .fini = _nouveau_bar_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
index ca8139b9ab27..0a44459844e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/nvc0.c
@@ -133,7 +133,7 @@ nvc0_bar_init_vm(struct nvc0_bar_priv *priv, struct nvc0_bar_priv_vm *bar_vm,
return 0;
}
-static int
+int
nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -169,7 +169,7 @@ nvc0_bar_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return 0;
}
-static void
+void
nvc0_bar_dtor(struct nouveau_object *object)
{
struct nvc0_bar_priv *priv = (void *)object;
@@ -188,7 +188,7 @@ nvc0_bar_dtor(struct nouveau_object *object)
nouveau_bar_destroy(&priv->base);
}
-static int
+int
nvc0_bar_init(struct nouveau_object *object)
{
struct nvc0_bar_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
index ffad8f337ead..3ee8b1476d00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/priv.h
@@ -23,4 +23,10 @@ int nouveau_bar_alloc(struct nouveau_bar *, struct nouveau_object *,
void nv84_bar_flush(struct nouveau_bar *);
+int nvc0_bar_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void nvc0_bar_dtor(struct nouveau_object *);
+int nvc0_bar_init(struct nouveau_object *);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
index 22351f594d2a..a276a711294a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/base.c
@@ -90,16 +90,20 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
cstate = &pstate->base;
}
- ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
- if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise fan speed: %d\n", ret);
- return ret;
+ if (ptherm) {
+ ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, +1);
+ if (ret && ret != -ENODEV) {
+ nv_error(clk, "failed to raise fan speed: %d\n", ret);
+ return ret;
+ }
}
- ret = volt->set_id(volt, cstate->voltage, +1);
- if (ret && ret != -ENODEV) {
- nv_error(clk, "failed to raise voltage: %d\n", ret);
- return ret;
+ if (volt) {
+ ret = volt->set_id(volt, cstate->voltage, +1);
+ if (ret && ret != -ENODEV) {
+ nv_error(clk, "failed to raise voltage: %d\n", ret);
+ return ret;
+ }
}
ret = clk->calc(clk, cstate);
@@ -108,13 +112,17 @@ nouveau_cstate_prog(struct nouveau_clock *clk,
clk->tidy(clk);
}
- ret = volt->set_id(volt, cstate->voltage, -1);
- if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower voltage: %d\n", ret);
+ if (volt) {
+ ret = volt->set_id(volt, cstate->voltage, -1);
+ if (ret && ret != -ENODEV)
+ nv_error(clk, "failed to lower voltage: %d\n", ret);
+ }
- ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
- if (ret && ret != -ENODEV)
- nv_error(clk, "failed to lower fan speed: %d\n", ret);
+ if (ptherm) {
+ ret = nouveau_therm_cstate(ptherm, pstate->fanspeed, -1);
+ if (ret && ret != -ENODEV)
+ nv_error(clk, "failed to lower fan speed: %d\n", ret);
+ }
return 0;
}
@@ -194,16 +202,23 @@ nouveau_pstate_prog(struct nouveau_clock *clk, int pstatei)
return nouveau_cstate_prog(clk, pstate, 0);
}
-static int
-nouveau_pstate_calc(struct nouveau_clock *clk)
+static void
+nouveau_pstate_work(struct work_struct *work)
{
- int pstate, ret = 0;
+ struct nouveau_clock *clk = container_of(work, typeof(*clk), work);
+ int pstate;
- nv_trace(clk, "P %d U %d A %d T %d D %d\n", clk->pstate,
- clk->ustate, clk->astate, clk->tstate, clk->dstate);
+ if (!atomic_xchg(&clk->waiting, 0))
+ return;
+ clk->pwrsrc = power_supply_is_system_supplied();
- if (clk->state_nr && clk->ustate != -1) {
- pstate = (clk->ustate < 0) ? clk->astate : clk->ustate;
+ nv_trace(clk, "P %d PWR %d U(AC) %d U(DC) %d A %d T %d D %d\n",
+ clk->pstate, clk->pwrsrc, clk->ustate_ac, clk->ustate_dc,
+ clk->astate, clk->tstate, clk->dstate);
+
+ pstate = clk->pwrsrc ? clk->ustate_ac : clk->ustate_dc;
+ if (clk->state_nr && pstate != -1) {
+ pstate = (pstate < 0) ? clk->astate : pstate;
pstate = min(pstate, clk->state_nr - 1 - clk->tstate);
pstate = max(pstate, clk->dstate);
} else {
@@ -211,9 +226,26 @@ nouveau_pstate_calc(struct nouveau_clock *clk)
}
nv_trace(clk, "-> %d\n", pstate);
- if (pstate != clk->pstate)
- ret = nouveau_pstate_prog(clk, pstate);
- return ret;
+ if (pstate != clk->pstate) {
+ int ret = nouveau_pstate_prog(clk, pstate);
+ if (ret) {
+ nv_error(clk, "error setting pstate %d: %d\n",
+ pstate, ret);
+ }
+ }
+
+ wake_up_all(&clk->wait);
+ nvkm_notify_get(&clk->pwrsrc_ntfy);
+}
+
+static int
+nouveau_pstate_calc(struct nouveau_clock *clk, bool wait)
+{
+ atomic_set(&clk->waiting, 1);
+ schedule_work(&clk->work);
+ if (wait)
+ wait_event(clk->wait, !atomic_read(&clk->waiting));
+ return 0;
}
static void
@@ -361,17 +393,40 @@ nouveau_clock_ustate_update(struct nouveau_clock *clk, int req)
req = i;
}
- clk->ustate = req;
- return 0;
+ return req + 2;
+}
+
+static int
+nouveau_clock_nstate(struct nouveau_clock *clk, const char *mode, int arglen)
+{
+ int ret = 1;
+
+ if (strncasecmpz(mode, "disabled", arglen)) {
+ char save = mode[arglen];
+ long v;
+
+ ((char *)mode)[arglen] = '\0';
+ if (!kstrtol(mode, 0, &v)) {
+ ret = nouveau_clock_ustate_update(clk, v);
+ if (ret < 0)
+ ret = 1;
+ }
+ ((char *)mode)[arglen] = save;
+ }
+
+ return ret - 2;
}
int
-nouveau_clock_ustate(struct nouveau_clock *clk, int req)
+nouveau_clock_ustate(struct nouveau_clock *clk, int req, int pwr)
{
int ret = nouveau_clock_ustate_update(clk, req);
- if (ret)
- return ret;
- return nouveau_pstate_calc(clk);
+ if (ret >= 0) {
+ if (ret -= 2, pwr) clk->ustate_ac = ret;
+ else clk->ustate_dc = ret;
+ return nouveau_pstate_calc(clk, true);
+ }
+ return ret;
}
int
@@ -381,7 +436,7 @@ nouveau_clock_astate(struct nouveau_clock *clk, int req, int rel)
if ( rel) clk->astate += rel;
clk->astate = min(clk->astate, clk->state_nr - 1);
clk->astate = max(clk->astate, 0);
- return nouveau_pstate_calc(clk);
+ return nouveau_pstate_calc(clk, true);
}
int
@@ -391,7 +446,7 @@ nouveau_clock_tstate(struct nouveau_clock *clk, int req, int rel)
if ( rel) clk->tstate += rel;
clk->tstate = min(clk->tstate, 0);
clk->tstate = max(clk->tstate, -(clk->state_nr - 1));
- return nouveau_pstate_calc(clk);
+ return nouveau_pstate_calc(clk, true);
}
int
@@ -401,12 +456,30 @@ nouveau_clock_dstate(struct nouveau_clock *clk, int req, int rel)
if ( rel) clk->dstate += rel;
clk->dstate = min(clk->dstate, clk->state_nr - 1);
clk->dstate = max(clk->dstate, 0);
- return nouveau_pstate_calc(clk);
+ return nouveau_pstate_calc(clk, true);
+}
+
+static int
+nouveau_clock_pwrsrc(struct nvkm_notify *notify)
+{
+ struct nouveau_clock *clk =
+ container_of(notify, typeof(*clk), pwrsrc_ntfy);
+ nouveau_pstate_calc(clk, false);
+ return NVKM_NOTIFY_DROP;
}
/******************************************************************************
* subdev base class implementation
*****************************************************************************/
+
+int
+_nouveau_clock_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nouveau_clock *clk = (void *)object;
+ nvkm_notify_put(&clk->pwrsrc_ntfy);
+ return nouveau_subdev_fini(&clk->base, suspend);
+}
+
int
_nouveau_clock_init(struct nouveau_object *object)
{
@@ -414,6 +487,10 @@ _nouveau_clock_init(struct nouveau_object *object)
struct nouveau_clocks *clock = clk->domains;
int ret;
+ ret = nouveau_subdev_init(&clk->base);
+ if (ret)
+ return ret;
+
memset(&clk->bstate, 0x00, sizeof(clk->bstate));
INIT_LIST_HEAD(&clk->bstate.list);
clk->bstate.pstate = 0xff;
@@ -434,7 +511,7 @@ _nouveau_clock_init(struct nouveau_object *object)
clk->tstate = 0;
clk->dstate = 0;
clk->pstate = -1;
- nouveau_pstate_calc(clk);
+ nouveau_pstate_calc(clk, true);
return 0;
}
@@ -444,6 +521,8 @@ _nouveau_clock_dtor(struct nouveau_object *object)
struct nouveau_clock *clk = (void *)object;
struct nouveau_pstate *pstate, *temp;
+ nvkm_notify_fini(&clk->pwrsrc_ntfy);
+
list_for_each_entry_safe(pstate, temp, &clk->states, head) {
nouveau_pstate_del(pstate);
}
@@ -456,6 +535,7 @@ nouveau_clock_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass,
struct nouveau_clocks *clocks,
+ struct nouveau_pstate *pstates, int nb_pstates,
bool allow_reclock,
int length, void **object)
{
@@ -472,29 +552,46 @@ nouveau_clock_create_(struct nouveau_object *parent,
INIT_LIST_HEAD(&clk->states);
clk->domains = clocks;
- clk->ustate = -1;
+ clk->ustate_ac = -1;
+ clk->ustate_dc = -1;
+
+ INIT_WORK(&clk->work, nouveau_pstate_work);
+ init_waitqueue_head(&clk->wait);
+ atomic_set(&clk->waiting, 0);
- idx = 0;
- do {
- ret = nouveau_pstate_new(clk, idx++);
- } while (ret == 0);
+ /* If no pstates are provided, try and fetch them from the BIOS */
+ if (!pstates) {
+ idx = 0;
+ do {
+ ret = nouveau_pstate_new(clk, idx++);
+ } while (ret == 0);
+ } else {
+ for (idx = 0; idx < nb_pstates; idx++)
+ list_add_tail(&pstates[idx].head, &clk->states);
+ clk->state_nr = nb_pstates;
+ }
clk->allow_reclock = allow_reclock;
+ ret = nvkm_notify_init(&device->event, nouveau_clock_pwrsrc, true,
+ NULL, 0, 0, &clk->pwrsrc_ntfy);
+ if (ret)
+ return ret;
+
mode = nouveau_stropt(device->cfgopt, "NvClkMode", &arglen);
if (mode) {
- if (!strncasecmpz(mode, "disabled", arglen)) {
- clk->ustate = -1;
- } else {
- char save = mode[arglen];
- long v;
-
- ((char *)mode)[arglen] = '\0';
- if (!kstrtol(mode, 0, &v))
- nouveau_clock_ustate_update(clk, v);
- ((char *)mode)[arglen] = save;
- }
+ clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
+ clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
}
+ mode = nouveau_stropt(device->cfgopt, "NvClkModeAC", &arglen);
+ if (mode)
+ clk->ustate_ac = nouveau_clock_nstate(clk, mode, arglen);
+
+ mode = nouveau_stropt(device->cfgopt, "NvClkModeDC", &arglen);
+ if (mode)
+ clk->ustate_dc = nouveau_clock_nstate(clk, mode, arglen);
+
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
new file mode 100644
index 000000000000..425a8d5e9129
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Shamelessly ripped off from ChromeOS's gk20a/clk_pllg.c
+ *
+ */
+
+#define MHZ (1000 * 1000)
+
+#define MASK(w) ((1 << w) - 1)
+
+#define SYS_GPCPLL_CFG_BASE 0x00137000
+#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
+
+#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
+#define GPCPLL_CFG_ENABLE BIT(0)
+#define GPCPLL_CFG_IDDQ BIT(1)
+#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
+#define GPCPLL_CFG_LOCK BIT(17)
+
+#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
+#define GPCPLL_COEFF_M_SHIFT 0
+#define GPCPLL_COEFF_M_WIDTH 8
+#define GPCPLL_COEFF_N_SHIFT 8
+#define GPCPLL_COEFF_N_WIDTH 8
+#define GPCPLL_COEFF_P_SHIFT 16
+#define GPCPLL_COEFF_P_WIDTH 6
+
+#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
+#define GPCPLL_CFG2_SETUP2_SHIFT 16
+#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
+
+#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
+#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
+
+#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
+#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
+#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
+#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
+#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
+
+#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
+#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
+
+#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
+#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
+#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
+#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
+#define GPC2CLK_OUT_VCODIV_WIDTH 6
+#define GPC2CLK_OUT_VCODIV_SHIFT 8
+#define GPC2CLK_OUT_VCODIV1 0
+#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
+ GPC2CLK_OUT_VCODIV_SHIFT)
+#define GPC2CLK_OUT_BYPDIV_WIDTH 6
+#define GPC2CLK_OUT_BYPDIV_SHIFT 0
+#define GPC2CLK_OUT_BYPDIV31 0x3c
+#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
+ | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
+ | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
+#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
+ GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
+ | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
+ | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
+
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
+#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
+ (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
+
+#include <subdev/clock.h>
+#include <subdev/timer.h>
+
+#ifdef __KERNEL__
+#include <nouveau_platform.h>
+#endif
+
+static const u8 pl_to_div[] = {
+/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
+/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
+};
+
+/* All frequencies in Mhz */
+struct gk20a_clk_pllg_params {
+ u32 min_vco, max_vco;
+ u32 min_u, max_u;
+ u32 min_m, max_m;
+ u32 min_n, max_n;
+ u32 min_pl, max_pl;
+};
+
+static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
+ .min_vco = 1000, .max_vco = 1700,
+ .min_u = 12, .max_u = 38,
+ .min_m = 1, .max_m = 255,
+ .min_n = 8, .max_n = 255,
+ .min_pl = 1, .max_pl = 32,
+};
+
+struct gk20a_clock_priv {
+ struct nouveau_clock base;
+ const struct gk20a_clk_pllg_params *params;
+ u32 m, n, pl;
+ u32 parent_rate;
+};
+#define to_gk20a_clock(base) container_of(base, struct gk20a_clock_priv, base)
+
+static void
+gk20a_pllg_read_mnp(struct gk20a_clock_priv *priv)
+{
+ u32 val;
+
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ priv->m = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ priv->n = (val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH);
+ priv->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+}
+
+static u32
+gk20a_pllg_calc_rate(struct gk20a_clock_priv *priv)
+{
+ u32 rate;
+ u32 divider;
+
+ rate = priv->parent_rate * priv->n;
+ divider = priv->m * pl_to_div[priv->pl];
+ do_div(rate, divider);
+
+ return rate / 2;
+}
+
+static int
+gk20a_pllg_calc_mnp(struct gk20a_clock_priv *priv, unsigned long rate)
+{
+ u32 target_clk_f, ref_clk_f, target_freq;
+ u32 min_vco_f, max_vco_f;
+ u32 low_pl, high_pl, best_pl;
+ u32 target_vco_f, vco_f;
+ u32 best_m, best_n;
+ u32 u_f;
+ u32 m, n, n2;
+ u32 delta, lwv, best_delta = ~0;
+ u32 pl;
+
+ target_clk_f = rate * 2 / MHZ;
+ ref_clk_f = priv->parent_rate / MHZ;
+
+ max_vco_f = priv->params->max_vco;
+ min_vco_f = priv->params->min_vco;
+ best_m = priv->params->max_m;
+ best_n = priv->params->min_n;
+ best_pl = priv->params->min_pl;
+
+ target_vco_f = target_clk_f + target_clk_f / 50;
+ if (max_vco_f < target_vco_f)
+ max_vco_f = target_vco_f;
+
+ /* min_pl <= high_pl <= max_pl */
+ high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
+ high_pl = min(high_pl, priv->params->max_pl);
+ high_pl = max(high_pl, priv->params->min_pl);
+
+ /* min_pl <= low_pl <= max_pl */
+ low_pl = min_vco_f / target_vco_f;
+ low_pl = min(low_pl, priv->params->max_pl);
+ low_pl = max(low_pl, priv->params->min_pl);
+
+ /* Find Indices of high_pl and low_pl */
+ for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
+ if (pl_to_div[pl] >= low_pl) {
+ low_pl = pl;
+ break;
+ }
+ }
+ for (pl = 0; pl < ARRAY_SIZE(pl_to_div) - 1; pl++) {
+ if (pl_to_div[pl] >= high_pl) {
+ high_pl = pl;
+ break;
+ }
+ }
+
+ nv_debug(priv, "low_PL %d(div%d), high_PL %d(div%d)", low_pl,
+ pl_to_div[low_pl], high_pl, pl_to_div[high_pl]);
+
+ /* Select lowest possible VCO */
+ for (pl = low_pl; pl <= high_pl; pl++) {
+ target_vco_f = target_clk_f * pl_to_div[pl];
+ for (m = priv->params->min_m; m <= priv->params->max_m; m++) {
+ u_f = ref_clk_f / m;
+
+ if (u_f < priv->params->min_u)
+ break;
+ if (u_f > priv->params->max_u)
+ continue;
+
+ n = (target_vco_f * m) / ref_clk_f;
+ n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
+
+ if (n > priv->params->max_n)
+ break;
+
+ for (; n <= n2; n++) {
+ if (n < priv->params->min_n)
+ continue;
+ if (n > priv->params->max_n)
+ break;
+
+ vco_f = ref_clk_f * n / m;
+
+ if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
+ lwv = (vco_f + (pl_to_div[pl] / 2))
+ / pl_to_div[pl];
+ delta = abs(lwv - target_clk_f);
+
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_m = m;
+ best_n = n;
+ best_pl = pl;
+
+ if (best_delta == 0)
+ goto found_match;
+ }
+ }
+ }
+ }
+ }
+
+found_match:
+ WARN_ON(best_delta == ~0);
+
+ if (best_delta != 0)
+ nv_debug(priv, "no best match for target @ %dMHz on gpc_pll",
+ target_clk_f);
+
+ priv->m = best_m;
+ priv->n = best_n;
+ priv->pl = best_pl;
+
+ target_freq = gk20a_pllg_calc_rate(priv) / MHZ;
+
+ nv_debug(priv, "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n",
+ target_freq, priv->m, priv->n, priv->pl, pl_to_div[priv->pl]);
+
+ return 0;
+}
+
+static int
+gk20a_pllg_slide(struct gk20a_clock_priv *priv, u32 n)
+{
+ u32 val;
+ int ramp_timeout;
+
+ /* get old coefficients */
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ /* do nothing if NDIV is the same */
+ if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH)))
+ return 0;
+
+ /* setup */
+ nv_mask(priv, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
+ 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
+ nv_mask(priv, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
+ 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
+
+ /* pll slowdown mode */
+ nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
+
+ /* new ndiv ready for ramp */
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
+ val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
+ udelay(1);
+ nv_wr32(priv, GPCPLL_COEFF, val);
+
+ /* dynamic ramp to new ndiv */
+ val = nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+ val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
+ udelay(1);
+ nv_wr32(priv, GPCPLL_NDIV_SLOWDOWN, val);
+
+ for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) {
+ udelay(1);
+ val = nv_rd32(priv, GPC_BCAST_NDIV_SLOWDOWN_DEBUG);
+ if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK)
+ break;
+ }
+
+ /* exit slowdown mode */
+ nv_mask(priv, GPCPLL_NDIV_SLOWDOWN,
+ BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
+ BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
+ nv_rd32(priv, GPCPLL_NDIV_SLOWDOWN);
+
+ if (ramp_timeout <= 0) {
+ nv_error(priv, "gpcpll dynamic ramp timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void
+_gk20a_pllg_enable(struct gk20a_clock_priv *priv)
+{
+ nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
+ nv_rd32(priv, GPCPLL_CFG);
+}
+
+static void
+_gk20a_pllg_disable(struct gk20a_clock_priv *priv)
+{
+ nv_mask(priv, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
+ nv_rd32(priv, GPCPLL_CFG);
+}
+
+static int
+_gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv, bool allow_slide)
+{
+ u32 val, cfg;
+ u32 m_old, pl_old, n_lo;
+
+ /* get old coefficients */
+ val = nv_rd32(priv, GPCPLL_COEFF);
+ m_old = (val >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ pl_old = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
+
+ /* do NDIV slide if there is no change in M and PL */
+ cfg = nv_rd32(priv, GPCPLL_CFG);
+ if (allow_slide && priv->m == m_old && priv->pl == pl_old &&
+ (cfg & GPCPLL_CFG_ENABLE)) {
+ return gk20a_pllg_slide(priv, priv->n);
+ }
+
+ /* slide down to NDIV_LO */
+ n_lo = DIV_ROUND_UP(m_old * priv->params->min_vco,
+ priv->parent_rate / MHZ);
+ if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
+ int ret = gk20a_pllg_slide(priv, n_lo);
+
+ if (ret)
+ return ret;
+ }
+
+ /* split FO-to-bypass jump in halfs by setting out divider 1:2 */
+ nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
+ 0x2 << GPC2CLK_OUT_VCODIV_SHIFT);
+
+ /* put PLL in bypass before programming it */
+ val = nv_rd32(priv, SEL_VCO);
+ val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+ udelay(2);
+ nv_wr32(priv, SEL_VCO, val);
+
+ /* get out from IDDQ */
+ val = nv_rd32(priv, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_IDDQ) {
+ val &= ~GPCPLL_CFG_IDDQ;
+ nv_wr32(priv, GPCPLL_CFG, val);
+ nv_rd32(priv, GPCPLL_CFG);
+ udelay(2);
+ }
+
+ _gk20a_pllg_disable(priv);
+
+ nv_debug(priv, "%s: m=%d n=%d pl=%d\n", __func__, priv->m, priv->n,
+ priv->pl);
+
+ n_lo = DIV_ROUND_UP(priv->m * priv->params->min_vco,
+ priv->parent_rate / MHZ);
+ val = priv->m << GPCPLL_COEFF_M_SHIFT;
+ val |= (allow_slide ? n_lo : priv->n) << GPCPLL_COEFF_N_SHIFT;
+ val |= priv->pl << GPCPLL_COEFF_P_SHIFT;
+ nv_wr32(priv, GPCPLL_COEFF, val);
+
+ _gk20a_pllg_enable(priv);
+
+ val = nv_rd32(priv, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_LOCK_DET_OFF) {
+ val &= ~GPCPLL_CFG_LOCK_DET_OFF;
+ nv_wr32(priv, GPCPLL_CFG, val);
+ }
+
+ if (!nouveau_timer_wait_eq(priv, 300000, GPCPLL_CFG, GPCPLL_CFG_LOCK,
+ GPCPLL_CFG_LOCK)) {
+ nv_error(priv, "%s: timeout waiting for pllg lock\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ /* switch to VCO mode */
+ nv_mask(priv, SEL_VCO, 0, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
+
+ /* restore out divider 1:1 */
+ val = nv_rd32(priv, GPC2CLK_OUT);
+ val &= ~GPC2CLK_OUT_VCODIV_MASK;
+ udelay(2);
+ nv_wr32(priv, GPC2CLK_OUT, val);
+
+ /* slide up to new NDIV */
+ return allow_slide ? gk20a_pllg_slide(priv, priv->n) : 0;
+}
+
+static int
+gk20a_pllg_program_mnp(struct gk20a_clock_priv *priv)
+{
+ int err;
+
+ err = _gk20a_pllg_program_mnp(priv, true);
+ if (err)
+ err = _gk20a_pllg_program_mnp(priv, false);
+
+ return err;
+}
+
+static void
+gk20a_pllg_disable(struct gk20a_clock_priv *priv)
+{
+ u32 val;
+
+ /* slide to VCO min */
+ val = nv_rd32(priv, GPCPLL_CFG);
+ if (val & GPCPLL_CFG_ENABLE) {
+ u32 coeff, m, n_lo;
+
+ coeff = nv_rd32(priv, GPCPLL_COEFF);
+ m = (coeff >> GPCPLL_COEFF_M_SHIFT) & MASK(GPCPLL_COEFF_M_WIDTH);
+ n_lo = DIV_ROUND_UP(m * priv->params->min_vco,
+ priv->parent_rate / MHZ);
+ gk20a_pllg_slide(priv, n_lo);
+ }
+
+ /* put PLL in bypass before disabling it */
+ nv_mask(priv, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
+
+ _gk20a_pllg_disable(priv);
+}
+
+#define GK20A_CLK_GPC_MDIV 1000
+
+static struct nouveau_clocks
+gk20a_domains[] = {
+ { nv_clk_src_crystal, 0xff },
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max }
+};
+
+static struct nouveau_pstate
+gk20a_pstates[] = {
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 72000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 108000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 180000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 252000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 324000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 396000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 468000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 540000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 612000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 648000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 684000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 708000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 756000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 804000,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 852000,
+ },
+ },
+};
+
+static int
+gk20a_clock_read(struct nouveau_clock *clk, enum nv_clk_src src)
+{
+ struct gk20a_clock_priv *priv = (void *)clk;
+
+ switch (src) {
+ case nv_clk_src_crystal:
+ return nv_device(clk)->crystal;
+ case nv_clk_src_gpc:
+ gk20a_pllg_read_mnp(priv);
+ return gk20a_pllg_calc_rate(priv) / GK20A_CLK_GPC_MDIV;
+ default:
+ nv_error(clk, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
+static int
+gk20a_clock_calc(struct nouveau_clock *clk, struct nouveau_cstate *cstate)
+{
+ struct gk20a_clock_priv *priv = (void *)clk;
+
+ return gk20a_pllg_calc_mnp(priv, cstate->domain[nv_clk_src_gpc] *
+ GK20A_CLK_GPC_MDIV);
+}
+
+static int
+gk20a_clock_prog(struct nouveau_clock *clk)
+{
+ struct gk20a_clock_priv *priv = (void *)clk;
+
+ return gk20a_pllg_program_mnp(priv);
+}
+
+static void
+gk20a_clock_tidy(struct nouveau_clock *clk)
+{
+}
+
+static int
+gk20a_clock_fini(struct nouveau_object *object, bool suspend)
+{
+ struct gk20a_clock_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_clock_fini(&priv->base, false);
+
+ gk20a_pllg_disable(priv);
+
+ return ret;
+}
+
+static int
+gk20a_clock_init(struct nouveau_object *object)
+{
+ struct gk20a_clock_priv *priv = (void *)object;
+ int ret;
+
+ nv_mask(priv, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL);
+
+ ret = nouveau_clock_init(&priv->base);
+ if (ret)
+ return ret;
+
+ ret = gk20a_clock_prog(&priv->base);
+ if (ret) {
+ nv_error(priv, "cannot initialize clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+gk20a_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct gk20a_clock_priv *priv;
+ struct nouveau_platform_device *plat;
+ int ret;
+ int i;
+
+ /* Finish initializing the pstates */
+ for (i = 0; i < ARRAY_SIZE(gk20a_pstates); i++) {
+ INIT_LIST_HEAD(&gk20a_pstates[i].list);
+ gk20a_pstates[i].pstate = i + 1;
+ }
+
+ ret = nouveau_clock_create(parent, engine, oclass, gk20a_domains,
+ gk20a_pstates, ARRAY_SIZE(gk20a_pstates), true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->params = &gk20a_pllg_params;
+
+ plat = nv_device_to_platform(nv_device(parent));
+ priv->parent_rate = clk_get_rate(plat->gpu->clk);
+ nv_info(priv, "parent clock rate: %d Mhz\n", priv->parent_rate / MHZ);
+
+ priv->base.read = gk20a_clock_read;
+ priv->base.calc = gk20a_clock_calc;
+ priv->base.prog = gk20a_clock_prog;
+ priv->base.tidy = gk20a_clock_tidy;
+
+ return 0;
+}
+
+struct nouveau_oclass
+gk20a_clock_oclass = {
+ .handle = NV_SUBDEV(CLOCK, 0xea),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gk20a_clock_ctor,
+ .dtor = _nouveau_subdev_dtor,
+ .init = gk20a_clock_init,
+ .fini = gk20a_clock_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
index eb2d4425a49e..4c48232686be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv04.c
@@ -82,8 +82,8 @@ nv04_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, false,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv04_domain, NULL, 0,
+ false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
index 8a9e16839791..08368fe97029 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv40.c
@@ -213,8 +213,8 @@ nv40_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv40_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, true,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nv40_domain, NULL, 0,
+ true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
index 8c132772ba9e..5070ebc260f8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nv50.c
@@ -507,7 +507,7 @@ nv50_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_clock_create(parent, engine, oclass, pclass->domains,
- false, &priv);
+ NULL, 0, false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 9fb58354a80b..087012b18956 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -302,8 +302,8 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nva3_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, false,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
+ false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
index 6a65fc9e9663..74e19731b1b7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvaa.c
@@ -421,8 +421,8 @@ nvaa_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvaa_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, true,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nvaa_domains, NULL,
+ 0, true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index dbf8517f54da..1234abaab2db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -437,8 +437,8 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, false,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nvc0_domain, NULL, 0,
+ false, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
index 0e62a3240144..7eccad57512e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
@@ -475,8 +475,8 @@ nve0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nve0_clock_priv *priv;
int ret;
- ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, true,
- &priv);
+ ret = nouveau_clock_create(parent, engine, oclass, nve0_domain, NULL, 0,
+ true, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 1fc55c1e91a1..4150b0d10af8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -250,9 +250,11 @@ nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c08_page) {
- priv->r100c08 = nv_device_map_page(device, priv->r100c08_page);
- if (!priv->r100c08)
- nv_warn(priv, "failed 0x100c08 page map\n");
+ priv->r100c08 = dma_map_page(nv_device_base(device),
+ priv->r100c08_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(nv_device_base(device), priv->r100c08))
+ return -EFAULT;
} else {
nv_warn(priv, "failed 0x100c08 page alloc\n");
}
@@ -268,7 +270,8 @@ nv50_fb_dtor(struct nouveau_object *object)
struct nv50_fb_priv *priv = (void *)object;
if (priv->r100c08_page) {
- nv_device_unmap_page(device, priv->r100c08);
+ dma_unmap_page(nv_device_base(device), priv->r100c08, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(priv->r100c08_page);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 0670ae33ee45..b19a2b3c1081 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -70,7 +70,8 @@ nvc0_fb_dtor(struct nouveau_object *object)
struct nvc0_fb_priv *priv = (void *)object;
if (priv->r100c10_page) {
- nv_device_unmap_page(device, priv->r100c10);
+ dma_unmap_page(nv_device_base(device), priv->r100c10, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
__free_page(priv->r100c10_page);
}
@@ -93,8 +94,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (priv->r100c10_page) {
- priv->r100c10 = nv_device_map_page(device, priv->r100c10_page);
- if (!priv->r100c10)
+ priv->r100c10 = dma_map_page(nv_device_base(device),
+ priv->r100c10_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(nv_device_base(device), priv->r100c10))
return -EFAULT;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 5a6a5027f749..946518572346 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -26,7 +26,7 @@
#include <subdev/bios/pll.h>
#include <subdev/bios/rammap.h>
#include <subdev/bios/timing.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/clock.h>
#include <subdev/clock/pll.h>
@@ -425,7 +425,7 @@ extern const u8 nvc0_pte_storage_type_map[256];
void
nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
{
- struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_ltc *ltc = nouveau_ltc(pfb);
struct nouveau_mem *mem = *pmem;
*pmem = NULL;
@@ -434,7 +434,7 @@ nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
mutex_lock(&pfb->base.mutex);
if (mem->tag)
- ltcg->tags_free(ltcg, &mem->tag);
+ ltc->tags_free(ltc, &mem->tag);
__nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
@@ -468,12 +468,12 @@ nvc0_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
mutex_lock(&pfb->base.mutex);
if (comp) {
- struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_ltc *ltc = nouveau_ltc(pfb);
/* compression only works with lpages */
if (align == (1 << (17 - 12))) {
int n = size >> 5;
- ltcg->tags_alloc(ltcg, n, &mem->tag);
+ ltc->tags_alloc(ltc, n, &mem->tag);
}
if (unlikely(!mem->tag))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index 45e0202f3151..b1e3ed7c8beb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -106,39 +106,59 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
}
static void
-nouveau_gpio_intr_disable(struct nouveau_event *event, int type, int index)
+nouveau_gpio_intr_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
+ struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
impl->intr_mask(gpio, type, 1 << index, 0);
}
static void
-nouveau_gpio_intr_enable(struct nouveau_event *event, int type, int index)
+nouveau_gpio_intr_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_gpio *gpio = nouveau_gpio(event->priv);
+ struct nouveau_gpio *gpio = container_of(event, typeof(*gpio), event);
const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
impl->intr_mask(gpio, type, 1 << index, 1 << index);
}
+static int
+nouveau_gpio_intr_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ struct nvkm_gpio_ntfy_req *req = data;
+ if (!WARN_ON(size != sizeof(*req))) {
+ notify->size = sizeof(struct nvkm_gpio_ntfy_rep);
+ notify->types = req->mask;
+ notify->index = req->line;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_gpio_intr(struct nouveau_subdev *subdev)
{
struct nouveau_gpio *gpio = nouveau_gpio(subdev);
const struct nouveau_gpio_impl *impl = (void *)nv_object(gpio)->oclass;
- u32 hi, lo, e, i;
+ u32 hi, lo, i;
impl->intr_stat(gpio, &hi, &lo);
- for (i = 0; e = 0, (hi | lo) && i < impl->lines; i++) {
- if (hi & (1 << i))
- e |= NVKM_GPIO_HI;
- if (lo & (1 << i))
- e |= NVKM_GPIO_LO;
- nouveau_event_trigger(gpio->events, e, i);
+ for (i = 0; (hi | lo) && i < impl->lines; i++) {
+ struct nvkm_gpio_ntfy_rep rep = {
+ .mask = (NVKM_GPIO_HI * !!(hi & (1 << i))) |
+ (NVKM_GPIO_LO * !!(lo & (1 << i))),
+ };
+ nvkm_event_send(&gpio->event, rep.mask, i, &rep, sizeof(rep));
}
}
+static const struct nvkm_event_func
+nouveau_gpio_intr_func = {
+ .ctor = nouveau_gpio_intr_ctor,
+ .init = nouveau_gpio_intr_init,
+ .fini = nouveau_gpio_intr_fini,
+};
+
int
_nouveau_gpio_fini(struct nouveau_object *object, bool suspend)
{
@@ -183,7 +203,7 @@ void
_nouveau_gpio_dtor(struct nouveau_object *object)
{
struct nouveau_gpio *gpio = (void *)object;
- nouveau_event_destroy(&gpio->events);
+ nvkm_event_fini(&gpio->event);
nouveau_subdev_destroy(&gpio->base);
}
@@ -208,13 +228,11 @@ nouveau_gpio_create_(struct nouveau_object *parent,
gpio->get = nouveau_gpio_get;
gpio->reset = impl->reset;
- ret = nouveau_event_create(2, impl->lines, &gpio->events);
+ ret = nvkm_event_init(&nouveau_gpio_intr_func, 2, impl->lines,
+ &gpio->event);
if (ret)
return ret;
- gpio->events->priv = gpio;
- gpio->events->enable = nouveau_gpio_intr_enable;
- gpio->events->disable = nouveau_gpio_intr_disable;
nv_subdev(gpio)->intr = nouveau_gpio_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 09ba2cc851cf..a652cafde3d6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -326,9 +326,9 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
}
static void
-nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index)
+nouveau_i2c_intr_fini(struct nvkm_event *event, int type, int index)
{
- struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
+ struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nouveau_i2c_port *port = i2c->find(i2c, index);
const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
@@ -336,15 +336,28 @@ nouveau_i2c_intr_disable(struct nouveau_event *event, int type, int index)
}
static void
-nouveau_i2c_intr_enable(struct nouveau_event *event, int type, int index)
+nouveau_i2c_intr_init(struct nvkm_event *event, int type, int index)
{
- struct nouveau_i2c *i2c = nouveau_i2c(event->priv);
+ struct nouveau_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nouveau_i2c_port *port = i2c->find(i2c, index);
const struct nouveau_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
}
+static int
+nouveau_i2c_intr_ctor(void *data, u32 size, struct nvkm_notify *notify)
+{
+ struct nvkm_i2c_ntfy_req *req = data;
+ if (!WARN_ON(size != sizeof(*req))) {
+ notify->size = sizeof(struct nvkm_i2c_ntfy_rep);
+ notify->types = req->mask;
+ notify->index = req->port;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static void
nouveau_i2c_intr(struct nouveau_subdev *subdev)
{
@@ -364,13 +377,26 @@ nouveau_i2c_intr(struct nouveau_subdev *subdev)
if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
-
- nouveau_event_trigger(i2c->ntfy, e, port->index);
+ if (e) {
+ struct nvkm_i2c_ntfy_rep rep = {
+ .mask = e,
+ };
+ nvkm_event_send(&i2c->event, rep.mask,
+ port->index, &rep,
+ sizeof(rep));
+ }
}
}
}
}
+static const struct nvkm_event_func
+nouveau_i2c_intr_func = {
+ .ctor = nouveau_i2c_intr_ctor,
+ .init = nouveau_i2c_intr_init,
+ .fini = nouveau_i2c_intr_fini,
+};
+
int
_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
{
@@ -431,7 +457,7 @@ _nouveau_i2c_dtor(struct nouveau_object *object)
struct nouveau_i2c *i2c = (void *)object;
struct nouveau_i2c_port *port, *temp;
- nouveau_event_destroy(&i2c->ntfy);
+ nvkm_event_fini(&i2c->event);
list_for_each_entry_safe(port, temp, &i2c->ports, head) {
nouveau_object_ref(NULL, (struct nouveau_object **)&port);
@@ -547,13 +573,10 @@ nouveau_i2c_create_(struct nouveau_object *parent,
}
}
- ret = nouveau_event_create(4, index, &i2c->ntfy);
+ ret = nvkm_event_init(&nouveau_i2c_intr_func, 4, index, &i2c->event);
if (ret)
return ret;
- i2c->ntfy->priv = i2c;
- i2c->ntfy->enable = nouveau_i2c_intr_enable;
- i2c->ntfy->disable = nouveau_i2c_intr_disable;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
new file mode 100644
index 000000000000..32ed442c5913
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/base.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "priv.h"
+
+static int
+nvkm_ltc_tags_alloc(struct nouveau_ltc *ltc, u32 n,
+ struct nouveau_mm_node **pnode)
+{
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ int ret;
+
+ ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
+ if (ret)
+ *pnode = NULL;
+
+ return ret;
+}
+
+static void
+nvkm_ltc_tags_free(struct nouveau_ltc *ltc, struct nouveau_mm_node **pnode)
+{
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ nouveau_mm_free(&priv->tags, pnode);
+}
+
+static void
+nvkm_ltc_tags_clear(struct nouveau_ltc *ltc, u32 first, u32 count)
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ const u32 limit = first + count - 1;
+
+ BUG_ON((first > limit) || (limit >= priv->num_tags));
+
+ impl->cbc_clear(priv, first, limit);
+ impl->cbc_wait(priv);
+}
+
+static int
+nvkm_ltc_zbc_color_get(struct nouveau_ltc *ltc, int index, const u32 color[4])
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ memcpy(priv->zbc_color[index], color, sizeof(priv->zbc_color[index]));
+ impl->zbc_clear_color(priv, index, color);
+ return index;
+}
+
+static int
+nvkm_ltc_zbc_depth_get(struct nouveau_ltc *ltc, int index, const u32 depth)
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(ltc);
+ struct nvkm_ltc_priv *priv = (void *)ltc;
+ priv->zbc_depth[index] = depth;
+ impl->zbc_clear_depth(priv, index, depth);
+ return index;
+}
+
+int
+_nvkm_ltc_init(struct nouveau_object *object)
+{
+ const struct nvkm_ltc_impl *impl = (void *)nv_oclass(object);
+ struct nvkm_ltc_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_subdev_init(&priv->base.base);
+ if (ret)
+ return ret;
+
+ for (i = priv->base.zbc_min; i <= priv->base.zbc_max; i++) {
+ impl->zbc_clear_color(priv, i, priv->zbc_color[i]);
+ impl->zbc_clear_depth(priv, i, priv->zbc_depth[i]);
+ }
+
+ return 0;
+}
+
+int
+nvkm_ltc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, int length, void **pobject)
+{
+ const struct nvkm_ltc_impl *impl = (void *)oclass;
+ struct nvkm_ltc_priv *priv;
+ int ret;
+
+ ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PLTCG",
+ "l2c", length, pobject);
+ priv = *pobject;
+ if (ret)
+ return ret;
+
+ memset(priv->zbc_color, 0x00, sizeof(priv->zbc_color));
+ memset(priv->zbc_depth, 0x00, sizeof(priv->zbc_depth));
+
+ priv->base.base.intr = impl->intr;
+ priv->base.tags_alloc = nvkm_ltc_tags_alloc;
+ priv->base.tags_free = nvkm_ltc_tags_free;
+ priv->base.tags_clear = nvkm_ltc_tags_clear;
+ priv->base.zbc_min = 1; /* reserve 0 for disabled */
+ priv->base.zbc_max = min(impl->zbc, NOUVEAU_LTC_MAX_ZBC_CNT) - 1;
+ priv->base.zbc_color_get = nvkm_ltc_zbc_color_get;
+ priv->base.zbc_depth_get = nvkm_ltc_zbc_depth_get;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index f2f3338a967a..9e00a1ede120 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -25,10 +25,45 @@
#include <subdev/fb.h>
#include <subdev/timer.h>
-#include "gf100.h"
+#include "priv.h"
+
+void
+gf100_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+{
+ nv_wr32(priv, 0x17e8cc, start);
+ nv_wr32(priv, 0x17e8d0, limit);
+ nv_wr32(priv, 0x17e8c8, 0x00000004);
+}
+
+void
+gf100_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+{
+ int c, s;
+ for (c = 0; c < priv->ltc_nr; c++) {
+ for (s = 0; s < priv->lts_nr; s++)
+ nv_wait(priv, 0x1410c8 + c * 0x2000 + s * 0x400, ~0, 0);
+ }
+}
+
+void
+gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+{
+ nv_mask(priv, 0x17ea44, 0x0000000f, i);
+ nv_wr32(priv, 0x17ea48, color[0]);
+ nv_wr32(priv, 0x17ea4c, color[1]);
+ nv_wr32(priv, 0x17ea50, color[2]);
+ nv_wr32(priv, 0x17ea54, color[3]);
+}
+
+void
+gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+{
+ nv_mask(priv, 0x17ea44, 0x0000000f, i);
+ nv_wr32(priv, 0x17ea58, depth);
+}
static void
-gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
+gf100_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
{
u32 base = 0x141000 + (ltc * 0x2000) + (lts * 0x400);
u32 stat = nv_rd32(priv, base + 0x020);
@@ -39,17 +74,17 @@ gf100_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
}
}
-static void
-gf100_ltcg_intr(struct nouveau_subdev *subdev)
+void
+gf100_ltc_intr(struct nouveau_subdev *subdev)
{
- struct gf100_ltcg_priv *priv = (void *)subdev;
+ struct nvkm_ltc_priv *priv = (void *)subdev;
u32 mask;
mask = nv_rd32(priv, 0x00017c);
while (mask) {
u32 lts, ltc = __ffs(mask);
for (lts = 0; lts < priv->lts_nr; lts++)
- gf100_ltcg_lts_isr(priv, ltc, lts);
+ gf100_ltc_lts_isr(priv, ltc, lts);
mask &= ~(1 << ltc);
}
@@ -59,52 +94,38 @@ gf100_ltcg_intr(struct nouveau_subdev *subdev)
nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
}
-int
-gf100_ltcg_tags_alloc(struct nouveau_ltcg *ltcg, u32 n,
- struct nouveau_mm_node **pnode)
+static int
+gf100_ltc_init(struct nouveau_object *object)
{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
+ struct nvkm_ltc_priv *priv = (void *)object;
int ret;
- ret = nouveau_mm_head(&priv->tags, 1, n, n, 1, pnode);
+ ret = nvkm_ltc_init(priv);
if (ret)
- *pnode = NULL;
+ return ret;
- return ret;
+ nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+ nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
+ nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ return 0;
}
void
-gf100_ltcg_tags_free(struct nouveau_ltcg *ltcg, struct nouveau_mm_node **pnode)
-{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
-
- nouveau_mm_free(&priv->tags, pnode);
-}
-
-static void
-gf100_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+gf100_ltc_dtor(struct nouveau_object *object)
{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- u32 last = first + count - 1;
- int p, i;
-
- BUG_ON((first > last) || (last >= priv->num_tags));
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvkm_ltc_priv *priv = (void *)object;
- nv_wr32(priv, 0x17e8cc, first);
- nv_wr32(priv, 0x17e8d0, last);
- nv_wr32(priv, 0x17e8c8, 0x4); /* trigger clear */
+ nouveau_mm_fini(&priv->tags);
+ nouveau_mm_free(&pfb->vram, &priv->tag_ram);
- /* wait until it's finished with clearing */
- for (p = 0; p < priv->ltc_nr; ++p) {
- for (i = 0; i < priv->lts_nr; ++i)
- nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
- }
+ nvkm_ltc_destroy(priv);
}
/* TODO: Figure out tag memory details and drop the over-cautious allocation.
*/
int
-gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv)
+gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
{
u32 tag_size, tag_margin, tag_align;
int ret;
@@ -142,22 +163,22 @@ gf100_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct gf100_ltcg_priv *priv)
priv->tag_base = tag_base;
}
- ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
+ ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
return ret;
}
-static int
-gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+int
+gf100_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct gf100_ltcg_priv *priv;
struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
- ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
+ ret = nvkm_ltc_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -170,57 +191,27 @@ gf100_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
priv->lts_nr = nv_rd32(priv, 0x17e8dc) >> 28;
- ret = gf100_ltcg_init_tag_ram(pfb, priv);
+ ret = gf100_ltc_init_tag_ram(pfb, priv);
if (ret)
return ret;
- priv->base.tags_alloc = gf100_ltcg_tags_alloc;
- priv->base.tags_free = gf100_ltcg_tags_free;
- priv->base.tags_clear = gf100_ltcg_tags_clear;
-
- nv_subdev(priv)->intr = gf100_ltcg_intr;
- return 0;
-}
-
-void
-gf100_ltcg_dtor(struct nouveau_object *object)
-{
- struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- struct nouveau_fb *pfb = nouveau_fb(ltcg->base.base.parent);
-
- nouveau_mm_fini(&priv->tags);
- nouveau_mm_free(&pfb->vram, &priv->tag_ram);
-
- nouveau_ltcg_destroy(ltcg);
-}
-
-static int
-gf100_ltcg_init(struct nouveau_object *object)
-{
- struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- int ret;
-
- ret = nouveau_ltcg_init(ltcg);
- if (ret)
- return ret;
-
- nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
- nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
- if (nv_device(ltcg)->card_type >= NV_E0)
- nv_wr32(priv, 0x17e000, priv->ltc_nr);
- nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ nv_subdev(priv)->intr = gf100_ltc_intr;
return 0;
}
struct nouveau_oclass *
-gf100_ltcg_oclass = &(struct nouveau_oclass) {
- .handle = NV_SUBDEV(LTCG, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = gf100_ltcg_ctor,
- .dtor = gf100_ltcg_dtor,
- .init = gf100_ltcg_init,
- .fini = _nouveau_ltcg_fini,
+gf100_ltc_oclass = &(struct nvkm_ltc_impl) {
+ .base.handle = NV_SUBDEV(LTC, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gf100_ltc_ctor,
+ .dtor = gf100_ltc_dtor,
+ .init = gf100_ltc_init,
+ .fini = _nvkm_ltc_fini,
},
-};
+ .intr = gf100_ltc_intr,
+ .cbc_clear = gf100_ltc_cbc_clear,
+ .cbc_wait = gf100_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gf100_ltc_zbc_clear_color,
+ .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
new file mode 100644
index 000000000000..ea716569745d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gk104.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+static int
+gk104_ltc_init(struct nouveau_object *object)
+{
+ struct nvkm_ltc_priv *priv = (void *)object;
+ int ret;
+
+ ret = nvkm_ltc_init(priv);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
+ nv_wr32(priv, 0x17e000, priv->ltc_nr);
+ nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ return 0;
+}
+
+struct nouveau_oclass *
+gk104_ltc_oclass = &(struct nvkm_ltc_impl) {
+ .base.handle = NV_SUBDEV(LTC, 0xe4),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gf100_ltc_ctor,
+ .dtor = gf100_ltc_dtor,
+ .init = gk104_ltc_init,
+ .fini = _nvkm_ltc_fini,
+ },
+ .intr = gf100_ltc_intr,
+ .cbc_clear = gf100_ltc_cbc_clear,
+ .cbc_wait = gf100_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gf100_ltc_zbc_clear_color,
+ .zbc_clear_depth = gf100_ltc_zbc_clear_depth,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
index e79d0e81de40..4761b2e9af00 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gm107.c
@@ -25,10 +25,45 @@
#include <subdev/fb.h>
#include <subdev/timer.h>
-#include "gf100.h"
+#include "priv.h"
static void
-gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
+gm107_ltc_cbc_clear(struct nvkm_ltc_priv *priv, u32 start, u32 limit)
+{
+ nv_wr32(priv, 0x17e270, start);
+ nv_wr32(priv, 0x17e274, limit);
+ nv_wr32(priv, 0x17e26c, 0x00000004);
+}
+
+static void
+gm107_ltc_cbc_wait(struct nvkm_ltc_priv *priv)
+{
+ int c, s;
+ for (c = 0; c < priv->ltc_nr; c++) {
+ for (s = 0; s < priv->lts_nr; s++)
+ nv_wait(priv, 0x14046c + c * 0x2000 + s * 0x200, ~0, 0);
+ }
+}
+
+static void
+gm107_ltc_zbc_clear_color(struct nvkm_ltc_priv *priv, int i, const u32 color[4])
+{
+ nv_mask(priv, 0x17e338, 0x0000000f, i);
+ nv_wr32(priv, 0x17e33c, color[0]);
+ nv_wr32(priv, 0x17e340, color[1]);
+ nv_wr32(priv, 0x17e344, color[2]);
+ nv_wr32(priv, 0x17e348, color[3]);
+}
+
+static void
+gm107_ltc_zbc_clear_depth(struct nvkm_ltc_priv *priv, int i, const u32 depth)
+{
+ nv_mask(priv, 0x17e338, 0x0000000f, i);
+ nv_wr32(priv, 0x17e34c, depth);
+}
+
+static void
+gm107_ltc_lts_isr(struct nvkm_ltc_priv *priv, int ltc, int lts)
{
u32 base = 0x140000 + (ltc * 0x2000) + (lts * 0x400);
u32 stat = nv_rd32(priv, base + 0x00c);
@@ -40,16 +75,16 @@ gm107_ltcg_lts_isr(struct gf100_ltcg_priv *priv, int ltc, int lts)
}
static void
-gm107_ltcg_intr(struct nouveau_subdev *subdev)
+gm107_ltc_intr(struct nouveau_subdev *subdev)
{
- struct gf100_ltcg_priv *priv = (void *)subdev;
+ struct nvkm_ltc_priv *priv = (void *)subdev;
u32 mask;
mask = nv_rd32(priv, 0x00017c);
while (mask) {
u32 lts, ltc = __ffs(mask);
for (lts = 0; lts < priv->lts_nr; lts++)
- gm107_ltcg_lts_isr(priv, ltc, lts);
+ gm107_ltc_lts_isr(priv, ltc, lts);
mask &= ~(1 << ltc);
}
@@ -59,37 +94,32 @@ gm107_ltcg_intr(struct nouveau_subdev *subdev)
nv_mask(priv, 0x000640, 0x02000000, 0x00000000);
}
-static void
-gm107_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
+static int
+gm107_ltc_init(struct nouveau_object *object)
{
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- u32 last = first + count - 1;
- int p, i;
-
- BUG_ON((first > last) || (last >= priv->num_tags));
+ struct nvkm_ltc_priv *priv = (void *)object;
+ int ret;
- nv_wr32(priv, 0x17e270, first);
- nv_wr32(priv, 0x17e274, last);
- nv_wr32(priv, 0x17e26c, 0x4); /* trigger clear */
+ ret = nvkm_ltc_init(priv);
+ if (ret)
+ return ret;
- /* wait until it's finished with clearing */
- for (p = 0; p < priv->ltc_nr; ++p) {
- for (i = 0; i < priv->lts_nr; ++i)
- nv_wait(priv, 0x14046c + p * 0x2000 + i * 0x200, ~0, 0);
- }
+ nv_wr32(priv, 0x17e27c, priv->ltc_nr);
+ nv_wr32(priv, 0x17e278, priv->tag_base);
+ return 0;
}
static int
-gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+gm107_ltc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
- struct gf100_ltcg_priv *priv;
struct nouveau_fb *pfb = nouveau_fb(parent);
+ struct nvkm_ltc_priv *priv;
u32 parts, mask;
int ret, i;
- ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
+ ret = nvkm_ltc_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
@@ -102,41 +132,26 @@ gm107_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
priv->lts_nr = nv_rd32(priv, 0x17e280) >> 28;
- ret = gf100_ltcg_init_tag_ram(pfb, priv);
- if (ret)
- return ret;
-
- priv->base.tags_alloc = gf100_ltcg_tags_alloc;
- priv->base.tags_free = gf100_ltcg_tags_free;
- priv->base.tags_clear = gm107_ltcg_tags_clear;
-
- nv_subdev(priv)->intr = gm107_ltcg_intr;
- return 0;
-}
-
-static int
-gm107_ltcg_init(struct nouveau_object *object)
-{
- struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
- struct gf100_ltcg_priv *priv = (struct gf100_ltcg_priv *)ltcg;
- int ret;
-
- ret = nouveau_ltcg_init(ltcg);
+ ret = gf100_ltc_init_tag_ram(pfb, priv);
if (ret)
return ret;
- nv_wr32(priv, 0x17e27c, priv->ltc_nr);
- nv_wr32(priv, 0x17e278, priv->tag_base);
return 0;
}
struct nouveau_oclass *
-gm107_ltcg_oclass = &(struct nouveau_oclass) {
- .handle = NV_SUBDEV(LTCG, 0xff),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = gm107_ltcg_ctor,
- .dtor = gf100_ltcg_dtor,
- .init = gm107_ltcg_init,
- .fini = _nouveau_ltcg_fini,
+gm107_ltc_oclass = &(struct nvkm_ltc_impl) {
+ .base.handle = NV_SUBDEV(LTC, 0xff),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = gm107_ltc_ctor,
+ .dtor = gf100_ltc_dtor,
+ .init = gm107_ltc_init,
+ .fini = _nvkm_ltc_fini,
},
-};
+ .intr = gm107_ltc_intr,
+ .cbc_clear = gm107_ltc_cbc_clear,
+ .cbc_wait = gm107_ltc_cbc_wait,
+ .zbc = 16,
+ .zbc_clear_color = gm107_ltc_zbc_clear_color,
+ .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
new file mode 100644
index 000000000000..594924f39126
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/priv.h
@@ -0,0 +1,69 @@
+#ifndef __NVKM_LTC_PRIV_H__
+#define __NVKM_LTC_PRIV_H__
+
+#include <subdev/ltc.h>
+#include <subdev/fb.h>
+
+struct nvkm_ltc_priv {
+ struct nouveau_ltc base;
+ u32 ltc_nr;
+ u32 lts_nr;
+
+ u32 num_tags;
+ u32 tag_base;
+ struct nouveau_mm tags;
+ struct nouveau_mm_node *tag_ram;
+
+ u32 zbc_color[NOUVEAU_LTC_MAX_ZBC_CNT][4];
+ u32 zbc_depth[NOUVEAU_LTC_MAX_ZBC_CNT];
+};
+
+#define nvkm_ltc_create(p,e,o,d) \
+ nvkm_ltc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nvkm_ltc_destroy(p) ({ \
+ struct nvkm_ltc_priv *_priv = (p); \
+ _nvkm_ltc_dtor(nv_object(_priv)); \
+})
+#define nvkm_ltc_init(p) ({ \
+ struct nvkm_ltc_priv *_priv = (p); \
+ _nvkm_ltc_init(nv_object(_priv)); \
+})
+#define nvkm_ltc_fini(p,s) ({ \
+ struct nvkm_ltc_priv *_priv = (p); \
+ _nvkm_ltc_fini(nv_object(_priv), (s)); \
+})
+
+int nvkm_ltc_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+
+#define _nvkm_ltc_dtor _nouveau_subdev_dtor
+int _nvkm_ltc_init(struct nouveau_object *);
+#define _nvkm_ltc_fini _nouveau_subdev_fini
+
+int gf100_ltc_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+void gf100_ltc_dtor(struct nouveau_object *);
+int gf100_ltc_init_tag_ram(struct nouveau_fb *, struct nvkm_ltc_priv *);
+int gf100_ltc_tags_alloc(struct nouveau_ltc *, u32, struct nouveau_mm_node **);
+void gf100_ltc_tags_free(struct nouveau_ltc *, struct nouveau_mm_node **);
+
+struct nvkm_ltc_impl {
+ struct nouveau_oclass base;
+ void (*intr)(struct nouveau_subdev *);
+
+ void (*cbc_clear)(struct nvkm_ltc_priv *, u32 start, u32 limit);
+ void (*cbc_wait)(struct nvkm_ltc_priv *);
+
+ int zbc;
+ void (*zbc_clear_color)(struct nvkm_ltc_priv *, int, const u32[4]);
+ void (*zbc_clear_depth)(struct nvkm_ltc_priv *, int, const u32);
+};
+
+void gf100_ltc_intr(struct nouveau_subdev *);
+void gf100_ltc_cbc_clear(struct nvkm_ltc_priv *, u32, u32);
+void gf100_ltc_cbc_wait(struct nvkm_ltc_priv *);
+void gf100_ltc_zbc_clear_color(struct nvkm_ltc_priv *, int, const u32[4]);
+void gf100_ltc_zbc_clear_depth(struct nvkm_ltc_priv *, int, const u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h b/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
deleted file mode 100644
index 87b10b8412ea..000000000000
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/gf100.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifndef __NVKM_LTCG_PRIV_GF100_H__
-#define __NVKM_LTCG_PRIV_GF100_H__
-
-#include <subdev/ltcg.h>
-
-struct gf100_ltcg_priv {
- struct nouveau_ltcg base;
- u32 ltc_nr;
- u32 lts_nr;
- u32 num_tags;
- u32 tag_base;
- struct nouveau_mm tags;
- struct nouveau_mm_node *tag_ram;
-};
-
-void gf100_ltcg_dtor(struct nouveau_object *);
-int gf100_ltcg_init_tag_ram(struct nouveau_fb *, struct gf100_ltcg_priv *);
-int gf100_ltcg_tags_alloc(struct nouveau_ltcg *, u32, struct nouveau_mm_node **);
-void gf100_ltcg_tags_free(struct nouveau_ltcg *, struct nouveau_mm_node **);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 8a5555192fa5..ca7cee3a314a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -22,9 +22,17 @@
* Authors: Ben Skeggs
*/
-#include <subdev/mc.h>
+#include "priv.h"
#include <core/option.h>
+static inline void
+nouveau_mc_unk260(struct nouveau_mc *pmc, u32 data)
+{
+ const struct nouveau_mc_oclass *impl = (void *)nv_oclass(pmc);
+ if (impl->unk260)
+ impl->unk260(pmc, data);
+}
+
static inline u32
nouveau_mc_intr_mask(struct nouveau_mc *pmc)
{
@@ -114,6 +122,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ pmc->unk260 = nouveau_mc_unk260;
+
if (nv_device_is_pci(device))
switch (device->pdev->device & 0x0ff0) {
case 0x00f0:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c
new file mode 100644
index 000000000000..b8d6cb435d0a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/gk20a.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv04.h"
+
+struct nouveau_oclass *
+gk20a_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0xea),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv50_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nvc0_mc_intr,
+ .msi_rearm = nv40_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index 81a408e7d034..4d9ea46c47c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -1,7 +1,7 @@
#ifndef __NVKM_MC_NV04_H__
#define __NVKM_MC_NV04_H__
-#include <subdev/mc.h>
+#include "priv.h"
struct nv04_mc_priv {
struct nouveau_mc base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index f9c6a678b47d..15d41dc176ff 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -41,7 +41,7 @@ nvc0_mc_intr[] = {
{ 0x00200000, NVDEV_SUBDEV_GPIO }, /* PMGR->GPIO */
{ 0x00200000, NVDEV_SUBDEV_I2C }, /* PMGR->I2C/AUX */
{ 0x01000000, NVDEV_SUBDEV_PWR },
- { 0x02000000, NVDEV_SUBDEV_LTCG },
+ { 0x02000000, NVDEV_SUBDEV_LTC },
{ 0x08000000, NVDEV_SUBDEV_FB },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x40000000, NVDEV_SUBDEV_IBUS },
@@ -56,6 +56,12 @@ nvc0_mc_msi_rearm(struct nouveau_mc *pmc)
nv_wr32(priv, 0x088704, 0x00000000);
}
+void
+nvc0_mc_unk260(struct nouveau_mc *pmc, u32 data)
+{
+ nv_wr32(pmc, 0x000260, data);
+}
+
struct nouveau_oclass *
nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0xc0),
@@ -67,4 +73,5 @@ nvc0_mc_oclass = &(struct nouveau_mc_oclass) {
},
.intr = nvc0_mc_intr,
.msi_rearm = nvc0_mc_msi_rearm,
+ .unk260 = nvc0_mc_unk260,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
index 837e545aeb9f..68b5f61aadb5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc3.c
@@ -35,4 +35,5 @@ nvc3_mc_oclass = &(struct nouveau_mc_oclass) {
},
.intr = nvc0_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
+ .unk260 = nvc0_mc_unk260,
}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
new file mode 100644
index 000000000000..911e66392587
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/priv.h
@@ -0,0 +1,38 @@
+#ifndef __NVKM_MC_PRIV_H__
+#define __NVKM_MC_PRIV_H__
+
+#include <subdev/mc.h>
+
+#define nouveau_mc_create(p,e,o,d) \
+ nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_destroy(p) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
+})
+#define nouveau_mc_init(p) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_init(nv_object(pmc)); \
+})
+#define nouveau_mc_fini(p,s) ({ \
+ struct nouveau_mc *pmc = (p); _nouveau_mc_fini(nv_object(pmc), (s)); \
+})
+
+int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+void _nouveau_mc_dtor(struct nouveau_object *);
+int _nouveau_mc_init(struct nouveau_object *);
+int _nouveau_mc_fini(struct nouveau_object *, bool);
+
+struct nouveau_mc_intr {
+ u32 stat;
+ u32 unit;
+};
+
+struct nouveau_mc_oclass {
+ struct nouveau_oclass base;
+ const struct nouveau_mc_intr *intr;
+ void (*msi_rearm)(struct nouveau_mc *);
+ void (*unk260)(struct nouveau_mc *, u32);
+};
+
+void nvc0_mc_unk260(struct nouveau_mc *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
index d4fd3bc9c66f..69f1f34f6931 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/base.c
@@ -22,9 +22,18 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
#include <subdev/timer.h>
+#include "priv.h"
+
+static void
+nouveau_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
+{
+ const struct nvkm_pwr_impl *impl = (void *)nv_oclass(ppwr);
+ if (impl->pgob)
+ impl->pgob(ppwr, enable);
+}
+
static int
nouveau_pwr_send(struct nouveau_pwr *ppwr, u32 reply[2],
u32 process, u32 message, u32 data0, u32 data1)
@@ -177,6 +186,7 @@ _nouveau_pwr_fini(struct nouveau_object *object, bool suspend)
int
_nouveau_pwr_init(struct nouveau_object *object)
{
+ const struct nvkm_pwr_impl *impl = (void *)object->oclass;
struct nouveau_pwr *ppwr = (void *)object;
int ret, i;
@@ -186,6 +196,7 @@ _nouveau_pwr_init(struct nouveau_object *object)
nv_subdev(ppwr)->intr = nouveau_pwr_intr;
ppwr->message = nouveau_pwr_send;
+ ppwr->pgob = nouveau_pwr_pgob;
/* prevent previous ucode from running, wait for idle, reset */
nv_wr32(ppwr, 0x10a014, 0x0000ffff); /* INTR_EN_CLR = ALL */
@@ -195,15 +206,15 @@ _nouveau_pwr_init(struct nouveau_object *object)
/* upload data segment */
nv_wr32(ppwr, 0x10a1c0, 0x01000000);
- for (i = 0; i < ppwr->data.size / 4; i++)
- nv_wr32(ppwr, 0x10a1c4, ppwr->data.data[i]);
+ for (i = 0; i < impl->data.size / 4; i++)
+ nv_wr32(ppwr, 0x10a1c4, impl->data.data[i]);
/* upload code segment */
nv_wr32(ppwr, 0x10a180, 0x01000000);
- for (i = 0; i < ppwr->code.size / 4; i++) {
+ for (i = 0; i < impl->code.size / 4; i++) {
if ((i & 0x3f) == 0)
nv_wr32(ppwr, 0x10a188, i >> 6);
- nv_wr32(ppwr, 0x10a184, ppwr->code.data[i]);
+ nv_wr32(ppwr, 0x10a184, impl->code.data[i]);
}
/* start it running */
@@ -245,3 +256,15 @@ nouveau_pwr_create_(struct nouveau_object *parent,
init_waitqueue_head(&ppwr->recv.wait);
return 0;
}
+
+int
+_nouveau_pwr_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_pwr *ppwr;
+ int ret = nouveau_pwr_create(parent, engine, oclass, &ppwr);
+ *pobject = nv_object(ppwr);
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
index e2a63ac5422b..5668e045bac1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/macros.fuc
@@ -242,7 +242,7 @@
*/ push reg /*
*/ pop $r13 /*
*/ pop $r14 /*
-*/ call(wr32) /*
+*/ call(wr32)
#else
#define nv_wr32(addr,reg) /*
*/ sethi $r0 0x14000000 /*
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 39a5dc150a05..986495d533dd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000046f,
- 0x00000461,
+ 0x00000464,
+ 0x00000456,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000473,
- 0x00000471,
+ 0x00000468,
+ 0x00000466,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000877,
- 0x0000071e,
+ 0x0000086c,
+ 0x00000713,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000898,
- 0x00000879,
+ 0x0000088d,
+ 0x0000086e,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x000008a3,
- 0x000008a1,
+ 0x00000898,
+ 0x00000896,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nv108_pwr_data[] = {
0x000003df,
0x00040003,
0x00000000,
- 0x00000407,
+ 0x000003fc,
0x00010004,
0x00000000,
- 0x00000421,
+ 0x00000416,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1080,375 +1080,375 @@ uint32_t nv108_pwr_code[] = {
0x50f960f9,
0xe0fcd0fc,
0x00002e7e,
- 0x140003f1,
- 0xa00506fd,
- 0xb604bd05,
- 0x1bf40242,
-/* 0x0407: memx_func_wait */
- 0x0800f8dd,
- 0x0088cf2c,
- 0x98001e98,
- 0x1c98011d,
- 0x031b9802,
- 0x7e1010b6,
- 0xf8000071,
-/* 0x0421: memx_func_delay */
+ 0xf40242b6,
+ 0x00f8e81b,
+/* 0x03fc: memx_func_wait */
+ 0x88cf2c08,
0x001e9800,
- 0x7e0410b6,
- 0xf800005d,
-/* 0x042d: memx_exec */
- 0xf9e0f900,
- 0xb2c1b2d0,
-/* 0x0435: memx_exec_next */
- 0x001398b2,
- 0x950410b6,
- 0x30f01034,
- 0xde35980c,
- 0x12a655f9,
- 0xfced1ef4,
- 0x7ee0fcd0,
- 0xf800023f,
-/* 0x0455: memx_info */
- 0x03ac4c00,
- 0x7e08004b,
- 0xf800023f,
-/* 0x0461: memx_recv */
- 0x01d6b000,
- 0xb0c90bf4,
- 0x0bf400d6,
-/* 0x046f: memx_init */
- 0xf800f8eb,
-/* 0x0471: perf_recv */
-/* 0x0473: perf_init */
- 0xf800f800,
-/* 0x0475: i2c_drive_scl */
- 0x0036b000,
- 0x400d0bf4,
- 0x01f607e0,
- 0xf804bd00,
-/* 0x0485: i2c_drive_scl_lo */
- 0x07e44000,
- 0xbd0001f6,
-/* 0x048f: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0x07e0400d,
- 0xbd0002f6,
-/* 0x049f: i2c_drive_sda_lo */
- 0x4000f804,
- 0x02f607e4,
- 0xf804bd00,
-/* 0x04a9: i2c_sense_scl */
- 0x0132f400,
- 0xcf07c443,
- 0x31fd0033,
- 0x060bf404,
-/* 0x04bb: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x04bd: i2c_sense_sda */
- 0x0132f400,
- 0xcf07c443,
- 0x32fd0033,
- 0x060bf404,
-/* 0x04cf: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x04d1: i2c_raise_scl */
- 0x4440f900,
- 0x01030898,
- 0x0004757e,
-/* 0x04dc: i2c_raise_scl_wait */
- 0x7e03e84e,
- 0x7e00005d,
- 0xf40004a9,
- 0x42b60901,
- 0xef1bf401,
-/* 0x04f0: i2c_raise_scl_done */
- 0x00f840fc,
-/* 0x04f4: i2c_start */
- 0x0004a97e,
- 0x7e0d11f4,
- 0xf40004bd,
- 0x0ef40611,
-/* 0x0505: i2c_start_rep */
- 0x7e00032e,
- 0x03000475,
- 0x048f7e01,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xd17e50fc,
- 0x64b60004,
- 0x1d11f404,
-/* 0x0530: i2c_start_send */
- 0x8f7e0003,
- 0x884e0004,
- 0x005d7e13,
- 0x7e000300,
- 0x4e000475,
- 0x5d7e1388,
-/* 0x054a: i2c_start_out */
- 0x00f80000,
-/* 0x054c: i2c_stop */
- 0x757e0003,
- 0x00030004,
- 0x00048f7e,
- 0x7e03e84e,
- 0x0300005d,
- 0x04757e01,
- 0x13884e00,
+ 0x98011d98,
+ 0x1b98021c,
+ 0x1010b603,
+ 0x0000717e,
+/* 0x0416: memx_func_delay */
+ 0x1e9800f8,
+ 0x0410b600,
0x00005d7e,
- 0x8f7e0103,
- 0x884e0004,
- 0x005d7e13,
-/* 0x057b: i2c_bitw */
- 0x7e00f800,
- 0x4e00048f,
- 0x5d7e03e8,
- 0x76bb0000,
+/* 0x0422: memx_exec */
+ 0xe0f900f8,
+ 0xc1b2d0f9,
+/* 0x042a: memx_exec_next */
+ 0x1398b2b2,
+ 0x0410b600,
+ 0xf0103495,
+ 0x35980c30,
+ 0xa655f9de,
+ 0xed1ef412,
+ 0xe0fcd0fc,
+ 0x00023f7e,
+/* 0x044a: memx_info */
+ 0xac4c00f8,
+ 0x08004b03,
+ 0x00023f7e,
+/* 0x0456: memx_recv */
+ 0xd6b000f8,
+ 0xc90bf401,
+ 0xf400d6b0,
+ 0x00f8eb0b,
+/* 0x0464: memx_init */
+/* 0x0466: perf_recv */
+ 0x00f800f8,
+/* 0x0468: perf_init */
+/* 0x046a: i2c_drive_scl */
+ 0x36b000f8,
+ 0x0d0bf400,
+ 0xf607e040,
+ 0x04bd0001,
+/* 0x047a: i2c_drive_scl_lo */
+ 0xe44000f8,
+ 0x0001f607,
+ 0x00f804bd,
+/* 0x0484: i2c_drive_sda */
+ 0xf40036b0,
+ 0xe0400d0b,
+ 0x0002f607,
+ 0x00f804bd,
+/* 0x0494: i2c_drive_sda_lo */
+ 0xf607e440,
+ 0x04bd0002,
+/* 0x049e: i2c_sense_scl */
+ 0x32f400f8,
+ 0x07c44301,
+ 0xfd0033cf,
+ 0x0bf40431,
+ 0x0131f406,
+/* 0x04b0: i2c_sense_scl_done */
+/* 0x04b2: i2c_sense_sda */
+ 0x32f400f8,
+ 0x07c44301,
+ 0xfd0033cf,
+ 0x0bf40432,
+ 0x0131f406,
+/* 0x04c4: i2c_sense_sda_done */
+/* 0x04c6: i2c_raise_scl */
+ 0x40f900f8,
+ 0x03089844,
+ 0x046a7e01,
+/* 0x04d1: i2c_raise_scl_wait */
+ 0x03e84e00,
+ 0x00005d7e,
+ 0x00049e7e,
+ 0xb60901f4,
+ 0x1bf40142,
+/* 0x04e5: i2c_raise_scl_done */
+ 0xf840fcef,
+/* 0x04e9: i2c_start */
+ 0x049e7e00,
+ 0x0d11f400,
+ 0x0004b27e,
+ 0xf40611f4,
+/* 0x04fa: i2c_start_rep */
+ 0x00032e0e,
+ 0x00046a7e,
+ 0x847e0103,
+ 0x76bb0004,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb60004d1,
+ 0xb60004c6,
0x11f40464,
- 0x13884e17,
+/* 0x0525: i2c_start_send */
+ 0x7e00031d,
+ 0x4e000484,
+ 0x5d7e1388,
+ 0x00030000,
+ 0x00046a7e,
+ 0x7e13884e,
+/* 0x053f: i2c_start_out */
+ 0xf800005d,
+/* 0x0541: i2c_stop */
+ 0x7e000300,
+ 0x0300046a,
+ 0x04847e00,
+ 0x03e84e00,
0x00005d7e,
- 0x757e0003,
+ 0x6a7e0103,
0x884e0004,
0x005d7e13,
-/* 0x05b9: i2c_bitw_out */
-/* 0x05bb: i2c_bitr */
- 0x0300f800,
- 0x048f7e01,
- 0x03e84e00,
- 0x00005d7e,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x04d17e50,
- 0x0464b600,
- 0x7e1a11f4,
- 0x030004bd,
- 0x04757e00,
- 0x13884e00,
- 0x00005d7e,
- 0xf4013cf0,
-/* 0x05fe: i2c_bitr_done */
- 0x00f80131,
-/* 0x0600: i2c_get_byte */
- 0x08040005,
-/* 0x0604: i2c_get_byte_next */
- 0xbb0154b6,
+ 0x7e010300,
+ 0x4e000484,
+ 0x5d7e1388,
+ 0x00f80000,
+/* 0x0570: i2c_bitw */
+ 0x0004847e,
+ 0x7e03e84e,
+ 0xbb00005d,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0005bb7e,
+ 0x0004c67e,
0xf40464b6,
- 0x53fd2a11,
- 0x0142b605,
- 0x03d81bf4,
- 0x0076bb01,
+ 0x884e1711,
+ 0x005d7e13,
+ 0x7e000300,
+ 0x4e00046a,
+ 0x5d7e1388,
+/* 0x05ae: i2c_bitw_out */
+ 0x00f80000,
+/* 0x05b0: i2c_bitr */
+ 0x847e0103,
+ 0xe84e0004,
+ 0x005d7e03,
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x7b7e50fc,
- 0x64b60005,
-/* 0x064d: i2c_get_byte_done */
-/* 0x064f: i2c_put_byte */
- 0x0400f804,
-/* 0x0651: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x00057b7e,
- 0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0xc67e50fc,
+ 0x64b60004,
+ 0x1a11f404,
+ 0x0004b27e,
+ 0x6a7e0003,
+ 0x884e0004,
+ 0x005d7e13,
+ 0x013cf000,
+/* 0x05f3: i2c_bitr_done */
+ 0xf80131f4,
+/* 0x05f5: i2c_get_byte */
+ 0x04000500,
+/* 0x05f9: i2c_get_byte_next */
+ 0x0154b608,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x05bb7e50,
+ 0x05b07e50,
0x0464b600,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x06a7: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x06a9: i2c_addr */
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xf47e50fc,
- 0x64b60004,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
+ 0xfd2a11f4,
+ 0x42b60553,
+ 0xd81bf401,
+ 0x76bb0103,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb600064f,
-/* 0x06ee: i2c_addr_done */
+ 0xb6000570,
+/* 0x0642: i2c_get_byte_done */
0x00f80464,
-/* 0x06f0: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b705e4,
- 0x00f8d014,
-/* 0x06fc: i2c_acquire */
- 0x0006f07e,
- 0x0000047e,
- 0x7e03d9f0,
- 0xf800002e,
-/* 0x070d: i2c_release */
- 0x06f07e00,
- 0x00047e00,
- 0x03daf000,
- 0x00002e7e,
-/* 0x071e: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13b80137,
- 0x98000bd4,
- 0x13b80032,
- 0x98000bac,
- 0x31f40031,
- 0xf9d0f902,
- 0xf1d0f9e0,
- 0xf1000067,
- 0x92100063,
- 0x76bb0167,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb60006fc,
- 0xd0fc0464,
- 0xf500d6b0,
- 0x0500b01b,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xa97e50fc,
- 0x64b60006,
- 0xcc11f504,
- 0xe0c5c700,
+/* 0x0644: i2c_put_byte */
+/* 0x0646: i2c_put_byte_next */
+ 0x42b60804,
+ 0x3854ff01,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x064f7e50,
+ 0x05707e50,
0x0464b600,
- 0x00a911f5,
- 0x76bb0105,
+ 0xb03411f4,
+ 0x1bf40046,
+ 0x0076bbd8,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0xb07e50fc,
+ 0x64b60005,
+ 0x0f11f404,
+ 0xb00076bb,
+ 0x1bf40136,
+ 0x0132f406,
+/* 0x069c: i2c_put_byte_done */
+/* 0x069e: i2c_addr */
+ 0x76bb00f8,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb60006a9,
- 0x11f50464,
- 0x76bb0087,
+ 0xb60004e9,
+ 0x11f40464,
+ 0x2ec3e729,
+ 0x0134b601,
+ 0xbb0553fd,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0006447e,
+/* 0x06e3: i2c_addr_done */
+ 0xf80464b6,
+/* 0x06e5: i2c_acquire_addr */
+ 0xf8cec700,
+ 0xb705e4b6,
+ 0xf8d014e0,
+/* 0x06f1: i2c_acquire */
+ 0x06e57e00,
+ 0x00047e00,
+ 0x03d9f000,
+ 0x00002e7e,
+/* 0x0702: i2c_release */
+ 0xe57e00f8,
+ 0x047e0006,
+ 0xdaf00000,
+ 0x002e7e03,
+/* 0x0713: i2c_recv */
+ 0xf400f800,
+ 0xc1c70132,
+ 0x0214b6f8,
+ 0xf52816b0,
+ 0xb801371f,
+ 0x000bd413,
+ 0xb8003298,
+ 0x000bac13,
+ 0xf4003198,
+ 0xd0f90231,
+ 0xd0f9e0f9,
+ 0x000067f1,
+ 0x100063f1,
+ 0xbb016792,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0006f17e,
+ 0xfc0464b6,
+ 0x00d6b0d0,
+ 0x00b01bf5,
+ 0x76bb0005,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000600,
- 0x11f40464,
- 0xe05bcb67,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x054c7e50,
- 0x0464b600,
- 0x74bd5bb2,
-/* 0x0823: i2c_recv_not_rd08 */
- 0xb0410ef4,
- 0x1bf401d6,
- 0x7e00053b,
- 0xf40006a9,
- 0xc5c73211,
- 0x064f7ee0,
- 0x2811f400,
- 0xa97e0005,
+ 0xb600069e,
+ 0x11f50464,
+ 0xc5c700cc,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x447e50fc,
+ 0x64b60006,
+ 0xa911f504,
+ 0xbb010500,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x00069e7e,
+ 0xf50464b6,
+ 0xbb008711,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0005f57e,
+ 0xf40464b6,
+ 0x5bcb6711,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x417e50fc,
+ 0x64b60005,
+ 0xbd5bb204,
+ 0x410ef474,
+/* 0x0818: i2c_recv_not_rd08 */
+ 0xf401d6b0,
+ 0x00053b1b,
+ 0x00069e7e,
+ 0xc73211f4,
+ 0x447ee0c5,
0x11f40006,
- 0xe0b5c71f,
- 0x00064f7e,
- 0x7e1511f4,
- 0xbd00054c,
- 0x08c5c774,
- 0xf4091bf4,
- 0x0ef40232,
-/* 0x0861: i2c_recv_not_wr08 */
-/* 0x0861: i2c_recv_done */
- 0xf8cec703,
- 0x00070d7e,
- 0xd0fce0fc,
- 0xb20912f4,
- 0x023f7e7c,
-/* 0x0875: i2c_recv_exit */
-/* 0x0877: i2c_init */
- 0xf800f800,
-/* 0x0879: test_recv */
- 0x04584100,
- 0xb60011cf,
- 0x58400110,
- 0x0001f604,
- 0xe7f104bd,
- 0xe3f1d900,
- 0x967e134f,
- 0x00f80001,
-/* 0x0898: test_init */
- 0x7e08004e,
- 0xf8000196,
-/* 0x08a1: idle_recv */
-/* 0x08a3: idle */
- 0xf400f800,
- 0x54410031,
+ 0x7e000528,
+ 0xf400069e,
+ 0xb5c71f11,
+ 0x06447ee0,
+ 0x1511f400,
+ 0x0005417e,
+ 0xc5c774bd,
+ 0x091bf408,
+ 0xf40232f4,
+/* 0x0856: i2c_recv_not_wr08 */
+/* 0x0856: i2c_recv_done */
+ 0xcec7030e,
+ 0x07027ef8,
+ 0xfce0fc00,
+ 0x0912f4d0,
+ 0x3f7e7cb2,
+/* 0x086a: i2c_recv_exit */
+ 0x00f80002,
+/* 0x086c: i2c_init */
+/* 0x086e: test_recv */
+ 0x584100f8,
0x0011cf04,
0x400110b6,
- 0x01f60454,
-/* 0x08b7: idle_loop */
- 0x0104bd00,
- 0x0232f458,
-/* 0x08bc: idle_proc */
-/* 0x08bc: idle_proc_exec */
- 0x1eb210f9,
- 0x0002487e,
- 0x11f410fc,
- 0x0231f409,
-/* 0x08cf: idle_proc_next */
- 0xb6f00ef4,
- 0x1fa65810,
- 0xf4e81bf4,
- 0x28f4e002,
- 0xc60ef400,
+ 0x01f60458,
+ 0xf104bd00,
+ 0xf1d900e7,
+ 0x7e134fe3,
+ 0xf8000196,
+/* 0x088d: test_init */
+ 0x08004e00,
+ 0x0001967e,
+/* 0x0896: idle_recv */
+ 0x00f800f8,
+/* 0x0898: idle */
+ 0x410031f4,
+ 0x11cf0454,
+ 0x0110b600,
+ 0xf6045440,
+ 0x04bd0001,
+/* 0x08ac: idle_loop */
+ 0x32f45801,
+/* 0x08b1: idle_proc */
+/* 0x08b1: idle_proc_exec */
+ 0xb210f902,
+ 0x02487e1e,
+ 0xf410fc00,
+ 0x31f40911,
+ 0xf00ef402,
+/* 0x08c4: idle_proc_next */
+ 0xa65810b6,
+ 0xe81bf41f,
+ 0xf4e002f4,
+ 0x0ef40028,
+ 0x000000c6,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 254205cd5166..e087ce3041be 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000054e,
- 0x00000540,
+ 0x00000542,
+ 0x00000534,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000552,
- 0x00000550,
+ 0x00000546,
+ 0x00000544,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000982,
- 0x00000825,
+ 0x00000976,
+ 0x00000819,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x000009ab,
- 0x00000984,
+ 0x0000099f,
+ 0x00000978,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x000009b7,
- 0x000009b5,
+ 0x000009ab,
+ 0x000009a9,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nva3_pwr_data[] = {
0x000004b7,
0x00040003,
0x00000000,
- 0x000004df,
+ 0x000004d3,
0x00010004,
0x00000000,
- 0x000004fc,
+ 0x000004f0,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1198,13 +1198,10 @@ uint32_t nva3_pwr_code[] = {
0x0810b601,
0x50f960f9,
0xe0fcd0fc,
- 0xf13f21f4,
- 0xfd140003,
- 0x05800506,
- 0xb604bd00,
+ 0xb63f21f4,
0x1bf40242,
-/* 0x04df: memx_func_wait */
- 0xf000f8dd,
+/* 0x04d3: memx_func_wait */
+ 0xf000f8e9,
0x84b62c87,
0x0088cf06,
0x98001e98,
@@ -1212,14 +1209,14 @@ uint32_t nva3_pwr_code[] = {
0x031b9802,
0xf41010b6,
0x00f89c21,
-/* 0x04fc: memx_func_delay */
+/* 0x04f0: memx_func_delay */
0xb6001e98,
0x21f40410,
-/* 0x0507: memx_exec */
+/* 0x04fb: memx_exec */
0xf900f87f,
0xb9d0f9e0,
0xb2b902c1,
-/* 0x0511: memx_exec_next */
+/* 0x0505: memx_exec_next */
0x00139802,
0x950410b6,
0x30f01034,
@@ -1228,112 +1225,112 @@ uint32_t nva3_pwr_code[] = {
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
-/* 0x0532: memx_info */
+/* 0x0526: memx_info */
0xc7f100f8,
0xb7f103ac,
0x21f50800,
0x00f802b9,
-/* 0x0540: memx_recv */
+/* 0x0534: memx_recv */
0xf401d6b0,
0xd6b0c40b,
0xe90bf400,
-/* 0x054e: memx_init */
+/* 0x0542: memx_init */
0x00f800f8,
-/* 0x0550: perf_recv */
-/* 0x0552: perf_init */
+/* 0x0544: perf_recv */
+/* 0x0546: perf_init */
0x00f800f8,
-/* 0x0554: i2c_drive_scl */
+/* 0x0548: i2c_drive_scl */
0xf40036b0,
0x07f1110b,
0x04b607e0,
0x0001d006,
0x00f804bd,
-/* 0x0568: i2c_drive_scl_lo */
+/* 0x055c: i2c_drive_scl_lo */
0x07e407f1,
0xd00604b6,
0x04bd0001,
-/* 0x0576: i2c_drive_sda */
+/* 0x056a: i2c_drive_sda */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
0x04bd0002,
-/* 0x058a: i2c_drive_sda_lo */
+/* 0x057e: i2c_drive_sda_lo */
0x07f100f8,
0x04b607e4,
0x0002d006,
0x00f804bd,
-/* 0x0598: i2c_sense_scl */
+/* 0x058c: i2c_sense_scl */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0431fd00,
0xf4060bf4,
-/* 0x05ae: i2c_sense_scl_done */
+/* 0x05a2: i2c_sense_scl_done */
0x00f80131,
-/* 0x05b0: i2c_sense_sda */
+/* 0x05a4: i2c_sense_sda */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0432fd00,
0xf4060bf4,
-/* 0x05c6: i2c_sense_sda_done */
+/* 0x05ba: i2c_sense_sda_done */
0x00f80131,
-/* 0x05c8: i2c_raise_scl */
+/* 0x05bc: i2c_raise_scl */
0x47f140f9,
0x37f00898,
- 0x5421f501,
-/* 0x05d5: i2c_raise_scl_wait */
+ 0x4821f501,
+/* 0x05c9: i2c_raise_scl_wait */
0xe8e7f105,
0x7f21f403,
- 0x059821f5,
+ 0x058c21f5,
0xb60901f4,
0x1bf40142,
-/* 0x05e9: i2c_raise_scl_done */
+/* 0x05dd: i2c_raise_scl_done */
0xf840fcef,
-/* 0x05ed: i2c_start */
- 0x9821f500,
+/* 0x05e1: i2c_start */
+ 0x8c21f500,
0x0d11f405,
- 0x05b021f5,
+ 0x05a421f5,
0xf40611f4,
-/* 0x05fe: i2c_start_rep */
+/* 0x05f2: i2c_start_rep */
0x37f0300e,
- 0x5421f500,
+ 0x4821f500,
0x0137f005,
- 0x057621f5,
+ 0x056a21f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xc821f550,
+ 0xbc21f550,
0x0464b605,
-/* 0x062b: i2c_start_send */
+/* 0x061f: i2c_start_send */
0xf01f11f4,
0x21f50037,
- 0xe7f10576,
+ 0xe7f1056a,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x0647: i2c_start_out */
+/* 0x063b: i2c_start_out */
0xf87f21f4,
-/* 0x0649: i2c_stop */
+/* 0x063d: i2c_stop */
0x0037f000,
- 0x055421f5,
+ 0x054821f5,
0xf50037f0,
- 0xf1057621,
+ 0xf1056a21,
0xf403e8e7,
0x37f07f21,
- 0x5421f501,
+ 0x4821f501,
0x88e7f105,
0x7f21f413,
0xf50137f0,
- 0xf1057621,
+ 0xf1056a21,
0xf41388e7,
0x00f87f21,
-/* 0x067c: i2c_bitw */
- 0x057621f5,
+/* 0x0670: i2c_bitw */
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1341,18 +1338,18 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0xe7f11811,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x06bb: i2c_bitw_out */
+/* 0x06af: i2c_bitw_out */
0xf87f21f4,
-/* 0x06bd: i2c_bitr */
+/* 0x06b1: i2c_bitr */
0x0137f000,
- 0x057621f5,
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1360,19 +1357,19 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0x21f51b11,
- 0x37f005b0,
- 0x5421f500,
+ 0x37f005a4,
+ 0x4821f500,
0x88e7f105,
0x7f21f413,
0xf4013cf0,
-/* 0x0702: i2c_bitr_done */
+/* 0x06f6: i2c_bitr_done */
0x00f80131,
-/* 0x0704: i2c_get_byte */
+/* 0x06f8: i2c_get_byte */
0xf00057f0,
-/* 0x070a: i2c_get_byte_next */
+/* 0x06fe: i2c_get_byte_next */
0x54b60847,
0x0076bb01,
0xf90465b6,
@@ -1380,7 +1377,7 @@ uint32_t nva3_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b606bd,
+ 0x64b606b1,
0x2b11f404,
0xb60553fd,
0x1bf40142,
@@ -1390,12 +1387,12 @@ uint32_t nva3_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x7c21f550,
+ 0x7021f550,
0x0464b606,
-/* 0x0754: i2c_get_byte_done */
-/* 0x0756: i2c_put_byte */
+/* 0x0748: i2c_get_byte_done */
+/* 0x074a: i2c_put_byte */
0x47f000f8,
-/* 0x0759: i2c_put_byte_next */
+/* 0x074d: i2c_put_byte_next */
0x0142b608,
0xbb3854ff,
0x65b60076,
@@ -1403,7 +1400,7 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x067c21f5,
+ 0x067021f5,
0xf40464b6,
0x46b03411,
0xd81bf400,
@@ -1412,21 +1409,21 @@ uint32_t nva3_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xbd21f550,
+ 0xb121f550,
0x0464b606,
0xbb0f11f4,
0x36b00076,
0x061bf401,
-/* 0x07af: i2c_put_byte_done */
+/* 0x07a3: i2c_put_byte_done */
0xf80132f4,
-/* 0x07b1: i2c_addr */
+/* 0x07a5: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b605ed,
+ 0x64b605e1,
0x2911f404,
0x012ec3e7,
0xfd0134b6,
@@ -1436,24 +1433,24 @@ uint32_t nva3_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
-/* 0x07f6: i2c_addr_done */
+ 0xb6074a21,
+/* 0x07ea: i2c_addr_done */
0x00f80464,
-/* 0x07f8: i2c_acquire_addr */
+/* 0x07ec: i2c_acquire_addr */
0xb6f8cec7,
0xe0b702e4,
0xee980bfc,
-/* 0x0807: i2c_acquire */
+/* 0x07fb: i2c_acquire */
0xf500f800,
- 0xf407f821,
+ 0xf407ec21,
0xd9f00421,
0x3f21f403,
-/* 0x0816: i2c_release */
+/* 0x080a: i2c_release */
0x21f500f8,
- 0x21f407f8,
+ 0x21f407ec,
0x03daf004,
0xf83f21f4,
-/* 0x0825: i2c_recv */
+/* 0x0819: i2c_recv */
0x0132f400,
0xb6f8c1c7,
0x16b00214,
@@ -1472,7 +1469,7 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x080721f5,
+ 0x07fb21f5,
0xfc0464b6,
0x00d6b0d0,
0x00b31bf5,
@@ -1482,7 +1479,7 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x07b121f5,
+ 0x07a521f5,
0xf50464b6,
0xc700d011,
0x76bbe0c5,
@@ -1491,7 +1488,7 @@ uint32_t nva3_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
+ 0xb6074a21,
0x11f50464,
0x57f000ad,
0x0076bb01,
@@ -1500,7 +1497,7 @@ uint32_t nva3_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607b1,
+ 0x64b607a5,
0x8a11f504,
0x0076bb00,
0xf90465b6,
@@ -1508,7 +1505,7 @@ uint32_t nva3_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60704,
+ 0x64b606f8,
0x6a11f404,
0xbbe05bcb,
0x65b60076,
@@ -1516,38 +1513,38 @@ uint32_t nva3_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x064921f5,
+ 0x063d21f5,
0xb90464b6,
0x74bd025b,
-/* 0x092b: i2c_recv_not_rd08 */
+/* 0x091f: i2c_recv_not_rd08 */
0xb0430ef4,
0x1bf401d6,
0x0057f03d,
- 0x07b121f5,
+ 0x07a521f5,
0xc73311f4,
0x21f5e0c5,
- 0x11f40756,
+ 0x11f4074a,
0x0057f029,
- 0x07b121f5,
+ 0x07a521f5,
0xc71f11f4,
0x21f5e0b5,
- 0x11f40756,
- 0x4921f515,
+ 0x11f4074a,
+ 0x3d21f515,
0xc774bd06,
0x1bf408c5,
0x0232f409,
-/* 0x096b: i2c_recv_not_wr08 */
-/* 0x096b: i2c_recv_done */
+/* 0x095f: i2c_recv_not_wr08 */
+/* 0x095f: i2c_recv_done */
0xc7030ef4,
0x21f5f8ce,
- 0xe0fc0816,
+ 0xe0fc080a,
0x12f4d0fc,
0x027cb90a,
0x02b921f5,
-/* 0x0980: i2c_recv_exit */
-/* 0x0982: i2c_init */
+/* 0x0974: i2c_recv_exit */
+/* 0x0976: i2c_init */
0x00f800f8,
-/* 0x0984: test_recv */
+/* 0x0978: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1557,12 +1554,12 @@ uint32_t nva3_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x09ab: test_init */
+/* 0x099f: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x09b5: idle_recv */
-/* 0x09b7: idle */
+/* 0x09a9: idle_recv */
+/* 0x09ab: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1570,20 +1567,23 @@ uint32_t nva3_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x09d3: idle_loop */
+/* 0x09c7: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x09d9: idle_proc */
-/* 0x09d9: idle_proc_exec */
+/* 0x09cd: idle_proc */
+/* 0x09cd: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x09ed: idle_proc_next */
+/* 0x09e1: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index 7ac87405d01b..0773ff0e3dc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000054e,
- 0x00000540,
+ 0x00000542,
+ 0x00000534,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x00000552,
- 0x00000550,
+ 0x00000546,
+ 0x00000544,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000982,
- 0x00000825,
+ 0x00000976,
+ 0x00000819,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x000009ab,
- 0x00000984,
+ 0x0000099f,
+ 0x00000978,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x000009b7,
- 0x000009b5,
+ 0x000009ab,
+ 0x000009a9,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nvc0_pwr_data[] = {
0x000004b7,
0x00040003,
0x00000000,
- 0x000004df,
+ 0x000004d3,
0x00010004,
0x00000000,
- 0x000004fc,
+ 0x000004f0,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1198,13 +1198,10 @@ uint32_t nvc0_pwr_code[] = {
0x0810b601,
0x50f960f9,
0xe0fcd0fc,
- 0xf13f21f4,
- 0xfd140003,
- 0x05800506,
- 0xb604bd00,
+ 0xb63f21f4,
0x1bf40242,
-/* 0x04df: memx_func_wait */
- 0xf000f8dd,
+/* 0x04d3: memx_func_wait */
+ 0xf000f8e9,
0x84b62c87,
0x0088cf06,
0x98001e98,
@@ -1212,14 +1209,14 @@ uint32_t nvc0_pwr_code[] = {
0x031b9802,
0xf41010b6,
0x00f89c21,
-/* 0x04fc: memx_func_delay */
+/* 0x04f0: memx_func_delay */
0xb6001e98,
0x21f40410,
-/* 0x0507: memx_exec */
+/* 0x04fb: memx_exec */
0xf900f87f,
0xb9d0f9e0,
0xb2b902c1,
-/* 0x0511: memx_exec_next */
+/* 0x0505: memx_exec_next */
0x00139802,
0x950410b6,
0x30f01034,
@@ -1228,112 +1225,112 @@ uint32_t nvc0_pwr_code[] = {
0xec1ef406,
0xe0fcd0fc,
0x02b921f5,
-/* 0x0532: memx_info */
+/* 0x0526: memx_info */
0xc7f100f8,
0xb7f103ac,
0x21f50800,
0x00f802b9,
-/* 0x0540: memx_recv */
+/* 0x0534: memx_recv */
0xf401d6b0,
0xd6b0c40b,
0xe90bf400,
-/* 0x054e: memx_init */
+/* 0x0542: memx_init */
0x00f800f8,
-/* 0x0550: perf_recv */
-/* 0x0552: perf_init */
+/* 0x0544: perf_recv */
+/* 0x0546: perf_init */
0x00f800f8,
-/* 0x0554: i2c_drive_scl */
+/* 0x0548: i2c_drive_scl */
0xf40036b0,
0x07f1110b,
0x04b607e0,
0x0001d006,
0x00f804bd,
-/* 0x0568: i2c_drive_scl_lo */
+/* 0x055c: i2c_drive_scl_lo */
0x07e407f1,
0xd00604b6,
0x04bd0001,
-/* 0x0576: i2c_drive_sda */
+/* 0x056a: i2c_drive_sda */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
0x04bd0002,
-/* 0x058a: i2c_drive_sda_lo */
+/* 0x057e: i2c_drive_sda_lo */
0x07f100f8,
0x04b607e4,
0x0002d006,
0x00f804bd,
-/* 0x0598: i2c_sense_scl */
+/* 0x058c: i2c_sense_scl */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0431fd00,
0xf4060bf4,
-/* 0x05ae: i2c_sense_scl_done */
+/* 0x05a2: i2c_sense_scl_done */
0x00f80131,
-/* 0x05b0: i2c_sense_sda */
+/* 0x05a4: i2c_sense_sda */
0xf10132f4,
0xb607c437,
0x33cf0634,
0x0432fd00,
0xf4060bf4,
-/* 0x05c6: i2c_sense_sda_done */
+/* 0x05ba: i2c_sense_sda_done */
0x00f80131,
-/* 0x05c8: i2c_raise_scl */
+/* 0x05bc: i2c_raise_scl */
0x47f140f9,
0x37f00898,
- 0x5421f501,
-/* 0x05d5: i2c_raise_scl_wait */
+ 0x4821f501,
+/* 0x05c9: i2c_raise_scl_wait */
0xe8e7f105,
0x7f21f403,
- 0x059821f5,
+ 0x058c21f5,
0xb60901f4,
0x1bf40142,
-/* 0x05e9: i2c_raise_scl_done */
+/* 0x05dd: i2c_raise_scl_done */
0xf840fcef,
-/* 0x05ed: i2c_start */
- 0x9821f500,
+/* 0x05e1: i2c_start */
+ 0x8c21f500,
0x0d11f405,
- 0x05b021f5,
+ 0x05a421f5,
0xf40611f4,
-/* 0x05fe: i2c_start_rep */
+/* 0x05f2: i2c_start_rep */
0x37f0300e,
- 0x5421f500,
+ 0x4821f500,
0x0137f005,
- 0x057621f5,
+ 0x056a21f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xc821f550,
+ 0xbc21f550,
0x0464b605,
-/* 0x062b: i2c_start_send */
+/* 0x061f: i2c_start_send */
0xf01f11f4,
0x21f50037,
- 0xe7f10576,
+ 0xe7f1056a,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x0647: i2c_start_out */
+/* 0x063b: i2c_start_out */
0xf87f21f4,
-/* 0x0649: i2c_stop */
+/* 0x063d: i2c_stop */
0x0037f000,
- 0x055421f5,
+ 0x054821f5,
0xf50037f0,
- 0xf1057621,
+ 0xf1056a21,
0xf403e8e7,
0x37f07f21,
- 0x5421f501,
+ 0x4821f501,
0x88e7f105,
0x7f21f413,
0xf50137f0,
- 0xf1057621,
+ 0xf1056a21,
0xf41388e7,
0x00f87f21,
-/* 0x067c: i2c_bitw */
- 0x057621f5,
+/* 0x0670: i2c_bitw */
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1341,18 +1338,18 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0xe7f11811,
0x21f41388,
0x0037f07f,
- 0x055421f5,
+ 0x054821f5,
0x1388e7f1,
-/* 0x06bb: i2c_bitw_out */
+/* 0x06af: i2c_bitw_out */
0xf87f21f4,
-/* 0x06bd: i2c_bitr */
+/* 0x06b1: i2c_bitr */
0x0137f000,
- 0x057621f5,
+ 0x056a21f5,
0x03e8e7f1,
0xbb7f21f4,
0x65b60076,
@@ -1360,19 +1357,19 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05c821f5,
+ 0x05bc21f5,
0xf40464b6,
0x21f51b11,
- 0x37f005b0,
- 0x5421f500,
+ 0x37f005a4,
+ 0x4821f500,
0x88e7f105,
0x7f21f413,
0xf4013cf0,
-/* 0x0702: i2c_bitr_done */
+/* 0x06f6: i2c_bitr_done */
0x00f80131,
-/* 0x0704: i2c_get_byte */
+/* 0x06f8: i2c_get_byte */
0xf00057f0,
-/* 0x070a: i2c_get_byte_next */
+/* 0x06fe: i2c_get_byte_next */
0x54b60847,
0x0076bb01,
0xf90465b6,
@@ -1380,7 +1377,7 @@ uint32_t nvc0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b606bd,
+ 0x64b606b1,
0x2b11f404,
0xb60553fd,
0x1bf40142,
@@ -1390,12 +1387,12 @@ uint32_t nvc0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x7c21f550,
+ 0x7021f550,
0x0464b606,
-/* 0x0754: i2c_get_byte_done */
-/* 0x0756: i2c_put_byte */
+/* 0x0748: i2c_get_byte_done */
+/* 0x074a: i2c_put_byte */
0x47f000f8,
-/* 0x0759: i2c_put_byte_next */
+/* 0x074d: i2c_put_byte_next */
0x0142b608,
0xbb3854ff,
0x65b60076,
@@ -1403,7 +1400,7 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x067c21f5,
+ 0x067021f5,
0xf40464b6,
0x46b03411,
0xd81bf400,
@@ -1412,21 +1409,21 @@ uint32_t nvc0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xbd21f550,
+ 0xb121f550,
0x0464b606,
0xbb0f11f4,
0x36b00076,
0x061bf401,
-/* 0x07af: i2c_put_byte_done */
+/* 0x07a3: i2c_put_byte_done */
0xf80132f4,
-/* 0x07b1: i2c_addr */
+/* 0x07a5: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b605ed,
+ 0x64b605e1,
0x2911f404,
0x012ec3e7,
0xfd0134b6,
@@ -1436,24 +1433,24 @@ uint32_t nvc0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
-/* 0x07f6: i2c_addr_done */
+ 0xb6074a21,
+/* 0x07ea: i2c_addr_done */
0x00f80464,
-/* 0x07f8: i2c_acquire_addr */
+/* 0x07ec: i2c_acquire_addr */
0xb6f8cec7,
0xe0b702e4,
0xee980bfc,
-/* 0x0807: i2c_acquire */
+/* 0x07fb: i2c_acquire */
0xf500f800,
- 0xf407f821,
+ 0xf407ec21,
0xd9f00421,
0x3f21f403,
-/* 0x0816: i2c_release */
+/* 0x080a: i2c_release */
0x21f500f8,
- 0x21f407f8,
+ 0x21f407ec,
0x03daf004,
0xf83f21f4,
-/* 0x0825: i2c_recv */
+/* 0x0819: i2c_recv */
0x0132f400,
0xb6f8c1c7,
0x16b00214,
@@ -1472,7 +1469,7 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x080721f5,
+ 0x07fb21f5,
0xfc0464b6,
0x00d6b0d0,
0x00b31bf5,
@@ -1482,7 +1479,7 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x07b121f5,
+ 0x07a521f5,
0xf50464b6,
0xc700d011,
0x76bbe0c5,
@@ -1491,7 +1488,7 @@ uint32_t nvc0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6075621,
+ 0xb6074a21,
0x11f50464,
0x57f000ad,
0x0076bb01,
@@ -1500,7 +1497,7 @@ uint32_t nvc0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607b1,
+ 0x64b607a5,
0x8a11f504,
0x0076bb00,
0xf90465b6,
@@ -1508,7 +1505,7 @@ uint32_t nvc0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60704,
+ 0x64b606f8,
0x6a11f404,
0xbbe05bcb,
0x65b60076,
@@ -1516,38 +1513,38 @@ uint32_t nvc0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x064921f5,
+ 0x063d21f5,
0xb90464b6,
0x74bd025b,
-/* 0x092b: i2c_recv_not_rd08 */
+/* 0x091f: i2c_recv_not_rd08 */
0xb0430ef4,
0x1bf401d6,
0x0057f03d,
- 0x07b121f5,
+ 0x07a521f5,
0xc73311f4,
0x21f5e0c5,
- 0x11f40756,
+ 0x11f4074a,
0x0057f029,
- 0x07b121f5,
+ 0x07a521f5,
0xc71f11f4,
0x21f5e0b5,
- 0x11f40756,
- 0x4921f515,
+ 0x11f4074a,
+ 0x3d21f515,
0xc774bd06,
0x1bf408c5,
0x0232f409,
-/* 0x096b: i2c_recv_not_wr08 */
-/* 0x096b: i2c_recv_done */
+/* 0x095f: i2c_recv_not_wr08 */
+/* 0x095f: i2c_recv_done */
0xc7030ef4,
0x21f5f8ce,
- 0xe0fc0816,
+ 0xe0fc080a,
0x12f4d0fc,
0x027cb90a,
0x02b921f5,
-/* 0x0980: i2c_recv_exit */
-/* 0x0982: i2c_init */
+/* 0x0974: i2c_recv_exit */
+/* 0x0976: i2c_init */
0x00f800f8,
-/* 0x0984: test_recv */
+/* 0x0978: test_recv */
0x05d817f1,
0xcf0614b6,
0x10b60011,
@@ -1557,12 +1554,12 @@ uint32_t nvc0_pwr_code[] = {
0x00e7f104,
0x4fe3f1d9,
0xf521f513,
-/* 0x09ab: test_init */
+/* 0x099f: test_init */
0xf100f801,
0xf50800e7,
0xf801f521,
-/* 0x09b5: idle_recv */
-/* 0x09b7: idle */
+/* 0x09a9: idle_recv */
+/* 0x09ab: idle */
0xf400f800,
0x17f10031,
0x14b605d4,
@@ -1570,20 +1567,23 @@ uint32_t nvc0_pwr_code[] = {
0xf10110b6,
0xb605d407,
0x01d00604,
-/* 0x09d3: idle_loop */
+/* 0x09c7: idle_loop */
0xf004bd00,
0x32f45817,
-/* 0x09d9: idle_proc */
-/* 0x09d9: idle_proc_exec */
+/* 0x09cd: idle_proc */
+/* 0x09cd: idle_proc_exec */
0xb910f902,
0x21f5021e,
0x10fc02c2,
0xf40911f4,
0x0ef40231,
-/* 0x09ed: idle_proc_next */
+/* 0x09e1: idle_proc_next */
0x5810b6ef,
0xf4061fb8,
0x02f4e61b,
0x0028f4dd,
0x00bb0ef4,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index cd9ff1a73284..8d369b3faaba 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x000004c4,
- 0x000004b6,
+ 0x000004b8,
+ 0x000004aa,
0x00000000,
0x00000000,
0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x000004c8,
- 0x000004c6,
+ 0x000004bc,
+ 0x000004ba,
0x00000000,
0x00000000,
0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x000008e3,
- 0x00000786,
+ 0x000008d7,
+ 0x0000077a,
0x00000000,
0x00000000,
0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000906,
- 0x000008e5,
+ 0x000008fa,
+ 0x000008d9,
0x00000000,
0x00000000,
0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000912,
- 0x00000910,
+ 0x00000906,
+ 0x00000904,
0x00000000,
0x00000000,
0x00000000,
@@ -239,10 +239,10 @@ uint32_t nvd0_pwr_data[] = {
0x00000430,
0x00040003,
0x00000000,
- 0x00000458,
+ 0x0000044c,
0x00010004,
0x00000000,
- 0x00000472,
+ 0x00000466,
/* 0x03ac: memx_func_tail */
/* 0x03ac: memx_data_head */
0x00000000,
@@ -1100,26 +1100,23 @@ uint32_t nvd0_pwr_code[] = {
0xf960f908,
0xfcd0fc50,
0x3321f4e0,
- 0x140003f1,
- 0x800506fd,
- 0x04bd0005,
0xf40242b6,
- 0x00f8dd1b,
-/* 0x0458: memx_func_wait */
+ 0x00f8e91b,
+/* 0x044c: memx_func_wait */
0xcf2c87f0,
0x1e980088,
0x011d9800,
0x98021c98,
0x10b6031b,
0x7e21f410,
-/* 0x0472: memx_func_delay */
+/* 0x0466: memx_func_delay */
0x1e9800f8,
0x0410b600,
0xf86721f4,
-/* 0x047d: memx_exec */
+/* 0x0471: memx_exec */
0xf9e0f900,
0x02c1b9d0,
-/* 0x0487: memx_exec_next */
+/* 0x047b: memx_exec_next */
0x9802b2b9,
0x10b60013,
0x10349504,
@@ -1129,107 +1126,107 @@ uint32_t nvd0_pwr_code[] = {
0xd0fcec1e,
0x21f5e0fc,
0x00f8026b,
-/* 0x04a8: memx_info */
+/* 0x049c: memx_info */
0x03acc7f1,
0x0800b7f1,
0x026b21f5,
-/* 0x04b6: memx_recv */
+/* 0x04aa: memx_recv */
0xd6b000f8,
0xc40bf401,
0xf400d6b0,
0x00f8e90b,
-/* 0x04c4: memx_init */
-/* 0x04c6: perf_recv */
+/* 0x04b8: memx_init */
+/* 0x04ba: perf_recv */
0x00f800f8,
-/* 0x04c8: perf_init */
-/* 0x04ca: i2c_drive_scl */
+/* 0x04bc: perf_init */
+/* 0x04be: i2c_drive_scl */
0x36b000f8,
0x0e0bf400,
0x07e007f1,
0xbd0001d0,
-/* 0x04db: i2c_drive_scl_lo */
+/* 0x04cf: i2c_drive_scl_lo */
0xf100f804,
0xd007e407,
0x04bd0001,
-/* 0x04e6: i2c_drive_sda */
+/* 0x04da: i2c_drive_sda */
0x36b000f8,
0x0e0bf400,
0x07e007f1,
0xbd0002d0,
-/* 0x04f7: i2c_drive_sda_lo */
+/* 0x04eb: i2c_drive_sda_lo */
0xf100f804,
0xd007e407,
0x04bd0002,
-/* 0x0502: i2c_sense_scl */
+/* 0x04f6: i2c_sense_scl */
0x32f400f8,
0xc437f101,
0x0033cf07,
0xf40431fd,
0x31f4060b,
-/* 0x0515: i2c_sense_scl_done */
-/* 0x0517: i2c_sense_sda */
+/* 0x0509: i2c_sense_scl_done */
+/* 0x050b: i2c_sense_sda */
0xf400f801,
0x37f10132,
0x33cf07c4,
0x0432fd00,
0xf4060bf4,
-/* 0x052a: i2c_sense_sda_done */
+/* 0x051e: i2c_sense_sda_done */
0x00f80131,
-/* 0x052c: i2c_raise_scl */
+/* 0x0520: i2c_raise_scl */
0x47f140f9,
0x37f00898,
- 0xca21f501,
-/* 0x0539: i2c_raise_scl_wait */
+ 0xbe21f501,
+/* 0x052d: i2c_raise_scl_wait */
0xe8e7f104,
0x6721f403,
- 0x050221f5,
+ 0x04f621f5,
0xb60901f4,
0x1bf40142,
-/* 0x054d: i2c_raise_scl_done */
+/* 0x0541: i2c_raise_scl_done */
0xf840fcef,
-/* 0x0551: i2c_start */
- 0x0221f500,
- 0x0d11f405,
- 0x051721f5,
+/* 0x0545: i2c_start */
+ 0xf621f500,
+ 0x0d11f404,
+ 0x050b21f5,
0xf40611f4,
-/* 0x0562: i2c_start_rep */
+/* 0x0556: i2c_start_rep */
0x37f0300e,
- 0xca21f500,
+ 0xbe21f500,
0x0137f004,
- 0x04e621f5,
+ 0x04da21f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x2c21f550,
+ 0x2021f550,
0x0464b605,
-/* 0x058f: i2c_start_send */
+/* 0x0583: i2c_start_send */
0xf01f11f4,
0x21f50037,
- 0xe7f104e6,
+ 0xe7f104da,
0x21f41388,
0x0037f067,
- 0x04ca21f5,
+ 0x04be21f5,
0x1388e7f1,
-/* 0x05ab: i2c_start_out */
+/* 0x059f: i2c_start_out */
0xf86721f4,
-/* 0x05ad: i2c_stop */
+/* 0x05a1: i2c_stop */
0x0037f000,
- 0x04ca21f5,
+ 0x04be21f5,
0xf50037f0,
- 0xf104e621,
+ 0xf104da21,
0xf403e8e7,
0x37f06721,
- 0xca21f501,
+ 0xbe21f501,
0x88e7f104,
0x6721f413,
0xf50137f0,
- 0xf104e621,
+ 0xf104da21,
0xf41388e7,
0x00f86721,
-/* 0x05e0: i2c_bitw */
- 0x04e621f5,
+/* 0x05d4: i2c_bitw */
+ 0x04da21f5,
0x03e8e7f1,
0xbb6721f4,
0x65b60076,
@@ -1237,18 +1234,18 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x052c21f5,
+ 0x052021f5,
0xf40464b6,
0xe7f11811,
0x21f41388,
0x0037f067,
- 0x04ca21f5,
+ 0x04be21f5,
0x1388e7f1,
-/* 0x061f: i2c_bitw_out */
+/* 0x0613: i2c_bitw_out */
0xf86721f4,
-/* 0x0621: i2c_bitr */
+/* 0x0615: i2c_bitr */
0x0137f000,
- 0x04e621f5,
+ 0x04da21f5,
0x03e8e7f1,
0xbb6721f4,
0x65b60076,
@@ -1256,19 +1253,19 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x052c21f5,
+ 0x052021f5,
0xf40464b6,
0x21f51b11,
- 0x37f00517,
- 0xca21f500,
+ 0x37f0050b,
+ 0xbe21f500,
0x88e7f104,
0x6721f413,
0xf4013cf0,
-/* 0x0666: i2c_bitr_done */
+/* 0x065a: i2c_bitr_done */
0x00f80131,
-/* 0x0668: i2c_get_byte */
+/* 0x065c: i2c_get_byte */
0xf00057f0,
-/* 0x066e: i2c_get_byte_next */
+/* 0x0662: i2c_get_byte_next */
0x54b60847,
0x0076bb01,
0xf90465b6,
@@ -1276,7 +1273,7 @@ uint32_t nvd0_pwr_code[] = {
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60621,
+ 0x64b60615,
0x2b11f404,
0xb60553fd,
0x1bf40142,
@@ -1286,12 +1283,12 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xe021f550,
+ 0xd421f550,
0x0464b605,
-/* 0x06b8: i2c_get_byte_done */
-/* 0x06ba: i2c_put_byte */
+/* 0x06ac: i2c_get_byte_done */
+/* 0x06ae: i2c_put_byte */
0x47f000f8,
-/* 0x06bd: i2c_put_byte_next */
+/* 0x06b1: i2c_put_byte_next */
0x0142b608,
0xbb3854ff,
0x65b60076,
@@ -1299,7 +1296,7 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x05e021f5,
+ 0x05d421f5,
0xf40464b6,
0x46b03411,
0xd81bf400,
@@ -1308,21 +1305,21 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x2121f550,
+ 0x1521f550,
0x0464b606,
0xbb0f11f4,
0x36b00076,
0x061bf401,
-/* 0x0713: i2c_put_byte_done */
+/* 0x0707: i2c_put_byte_done */
0xf80132f4,
-/* 0x0715: i2c_addr */
+/* 0x0709: i2c_addr */
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b60551,
+ 0x64b60545,
0x2911f404,
0x012ec3e7,
0xfd0134b6,
@@ -1332,23 +1329,23 @@ uint32_t nvd0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb606ba21,
-/* 0x075a: i2c_addr_done */
+ 0xb606ae21,
+/* 0x074e: i2c_addr_done */
0x00f80464,
-/* 0x075c: i2c_acquire_addr */
+/* 0x0750: i2c_acquire_addr */
0xb6f8cec7,
0xe0b705e4,
0x00f8d014,
-/* 0x0768: i2c_acquire */
- 0x075c21f5,
+/* 0x075c: i2c_acquire */
+ 0x075021f5,
0xf00421f4,
0x21f403d9,
-/* 0x0777: i2c_release */
+/* 0x076b: i2c_release */
0xf500f833,
- 0xf4075c21,
+ 0xf4075021,
0xdaf00421,
0x3321f403,
-/* 0x0786: i2c_recv */
+/* 0x077a: i2c_recv */
0x32f400f8,
0xf8c1c701,
0xb00214b6,
@@ -1367,7 +1364,7 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x6821f550,
+ 0x5c21f550,
0x0464b607,
0xd6b0d0fc,
0xb31bf500,
@@ -1377,7 +1374,7 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x1521f550,
+ 0x0921f550,
0x0464b607,
0x00d011f5,
0xbbe0c5c7,
@@ -1386,7 +1383,7 @@ uint32_t nvd0_pwr_code[] = {
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x06ba21f5,
+ 0x06ae21f5,
0xf50464b6,
0xf000ad11,
0x76bb0157,
@@ -1395,7 +1392,7 @@ uint32_t nvd0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6071521,
+ 0xb6070921,
0x11f50464,
0x76bb008a,
0x0465b600,
@@ -1403,7 +1400,7 @@ uint32_t nvd0_pwr_code[] = {
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6066821,
+ 0xb6065c21,
0x11f40464,
0xe05bcb6a,
0xb60076bb,
@@ -1411,38 +1408,38 @@ uint32_t nvd0_pwr_code[] = {
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xad21f550,
+ 0xa121f550,
0x0464b605,
0xbd025bb9,
0x430ef474,
-/* 0x088c: i2c_recv_not_rd08 */
+/* 0x0880: i2c_recv_not_rd08 */
0xf401d6b0,
0x57f03d1b,
- 0x1521f500,
+ 0x0921f500,
0x3311f407,
0xf5e0c5c7,
- 0xf406ba21,
+ 0xf406ae21,
0x57f02911,
- 0x1521f500,
+ 0x0921f500,
0x1f11f407,
0xf5e0b5c7,
- 0xf406ba21,
+ 0xf406ae21,
0x21f51511,
- 0x74bd05ad,
+ 0x74bd05a1,
0xf408c5c7,
0x32f4091b,
0x030ef402,
-/* 0x08cc: i2c_recv_not_wr08 */
-/* 0x08cc: i2c_recv_done */
+/* 0x08c0: i2c_recv_not_wr08 */
+/* 0x08c0: i2c_recv_done */
0xf5f8cec7,
- 0xfc077721,
+ 0xfc076b21,
0xf4d0fce0,
0x7cb90a12,
0x6b21f502,
-/* 0x08e1: i2c_recv_exit */
-/* 0x08e3: i2c_init */
+/* 0x08d5: i2c_recv_exit */
+/* 0x08d7: i2c_init */
0xf800f802,
-/* 0x08e5: test_recv */
+/* 0x08d9: test_recv */
0xd817f100,
0x0011cf05,
0xf10110b6,
@@ -1451,28 +1448,28 @@ uint32_t nvd0_pwr_code[] = {
0xd900e7f1,
0x134fe3f1,
0x01b621f5,
-/* 0x0906: test_init */
+/* 0x08fa: test_init */
0xe7f100f8,
0x21f50800,
0x00f801b6,
-/* 0x0910: idle_recv */
-/* 0x0912: idle */
+/* 0x0904: idle_recv */
+/* 0x0906: idle */
0x31f400f8,
0xd417f100,
0x0011cf05,
0xf10110b6,
0xd005d407,
0x04bd0001,
-/* 0x0928: idle_loop */
+/* 0x091c: idle_loop */
0xf45817f0,
-/* 0x092e: idle_proc */
-/* 0x092e: idle_proc_exec */
+/* 0x0922: idle_proc */
+/* 0x0922: idle_proc_exec */
0x10f90232,
0xf5021eb9,
0xfc027421,
0x0911f410,
0xf40231f4,
-/* 0x0942: idle_proc_next */
+/* 0x0936: idle_proc_next */
0x10b6ef0e,
0x061fb858,
0xf4e61bf4,
@@ -1521,4 +1518,7 @@ uint32_t nvd0_pwr_code[] = {
0x00000000,
0x00000000,
0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c
new file mode 100644
index 000000000000..d76612999b9f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/gk104.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "priv.h"
+
+#define nvd0_pwr_code gk104_pwr_code
+#define nvd0_pwr_data gk104_pwr_data
+#include "fuc/nvd0.fuc.h"
+
+static void
+gk104_pwr_pgob(struct nouveau_pwr *ppwr, bool enable)
+{
+ nv_mask(ppwr, 0x000200, 0x00001000, 0x00000000);
+ nv_rd32(ppwr, 0x000200);
+ nv_mask(ppwr, 0x000200, 0x08000000, 0x08000000);
+ msleep(50);
+
+ nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000002);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(ppwr, 0x020004, 0xc0000000, enable ? 0xc0000000 : 0x40000000);
+ msleep(50);
+
+ nv_mask(ppwr, 0x10a78c, 0x00000002, 0x00000000);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000001);
+ nv_mask(ppwr, 0x10a78c, 0x00000001, 0x00000000);
+
+ nv_mask(ppwr, 0x000200, 0x08000000, 0x00000000);
+ nv_mask(ppwr, 0x000200, 0x00001000, 0x00001000);
+ nv_rd32(ppwr, 0x000200);
+}
+
+struct nouveau_oclass *
+gk104_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xe4),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
+ .dtor = _nouveau_pwr_dtor,
+ .init = _nouveau_pwr_init,
+ .fini = _nouveau_pwr_fini,
+ },
+ .code.data = gk104_pwr_code,
+ .code.size = sizeof(gk104_pwr_code),
+ .data.data = gk104_pwr_data,
+ .data.size = sizeof(gk104_pwr_data),
+ .pgob = gk104_pwr_pgob,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
index 03de3107d29f..def6a9ac68cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -1,8 +1,7 @@
#ifndef __NVKM_PWR_MEMX_H__
#define __NVKM_PWR_MEMX_H__
-#include <subdev/pwr.h>
-#include <subdev/pwr/fuc/os.h>
+#include "priv.h"
struct nouveau_memx {
struct nouveau_pwr *ppwr;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
index 52c85414866a..04ff7c3c34e9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nv108.c
@@ -22,41 +22,20 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nv108.fuc.h"
-struct nv108_pwr_priv {
- struct nouveau_pwr base;
-};
-
-static int
-nv108_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nv108_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nv108_pwr_code;
- priv->base.code.size = sizeof(nv108_pwr_code);
- priv->base.data.data = nv108_pwr_data;
- priv->base.data.size = sizeof(nv108_pwr_data);
- return 0;
-}
-
-struct nouveau_oclass
-nv108_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0x00),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nv108_pwr_ctor,
+struct nouveau_oclass *
+nv108_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0x00),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = _nouveau_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nv108_pwr_code,
+ .code.size = sizeof(nv108_pwr_code),
+ .data.data = nv108_pwr_data,
+ .data.size = sizeof(nv108_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
index c132b7ca9747..998d53076b8b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nva3.c
@@ -22,50 +22,29 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nva3.fuc.h"
-struct nva3_pwr_priv {
- struct nouveau_pwr base;
-};
-
static int
nva3_pwr_init(struct nouveau_object *object)
{
- struct nva3_pwr_priv *priv = (void *)object;
- nv_mask(priv, 0x022210, 0x00000001, 0x00000000);
- nv_mask(priv, 0x022210, 0x00000001, 0x00000001);
- return nouveau_pwr_init(&priv->base);
-}
-
-static int
-nva3_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nva3_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nva3_pwr_code;
- priv->base.code.size = sizeof(nva3_pwr_code);
- priv->base.data.data = nva3_pwr_data;
- priv->base.data.size = sizeof(nva3_pwr_data);
- return 0;
+ struct nouveau_pwr *ppwr = (void *)object;
+ nv_mask(ppwr, 0x022210, 0x00000001, 0x00000000);
+ nv_mask(ppwr, 0x022210, 0x00000001, 0x00000001);
+ return nouveau_pwr_init(ppwr);
}
-struct nouveau_oclass
-nva3_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0xa3),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nva3_pwr_ctor,
+struct nouveau_oclass *
+nva3_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xa3),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = nva3_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nva3_pwr_code,
+ .code.size = sizeof(nva3_pwr_code),
+ .data.data = nva3_pwr_data,
+ .data.size = sizeof(nva3_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
index 495f6857428d..9a773e66efa4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvc0.c
@@ -22,41 +22,20 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nvc0.fuc.h"
-struct nvc0_pwr_priv {
- struct nouveau_pwr base;
-};
-
-static int
-nvc0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvc0_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nvc0_pwr_code;
- priv->base.code.size = sizeof(nvc0_pwr_code);
- priv->base.data.data = nvc0_pwr_data;
- priv->base.data.size = sizeof(nvc0_pwr_data);
- return 0;
-}
-
-struct nouveau_oclass
-nvc0_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0xc0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvc0_pwr_ctor,
+struct nouveau_oclass *
+nvc0_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xc0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = _nouveau_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nvc0_pwr_code,
+ .code.size = sizeof(nvc0_pwr_code),
+ .data.data = nvc0_pwr_data,
+ .data.size = sizeof(nvc0_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
index 043aa142fe82..2b29be5d08ac 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/nvd0.c
@@ -22,41 +22,20 @@
* Authors: Ben Skeggs
*/
-#include <subdev/pwr.h>
-
+#include "priv.h"
#include "fuc/nvd0.fuc.h"
-struct nvd0_pwr_priv {
- struct nouveau_pwr base;
-};
-
-static int
-nvd0_pwr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, void *data, u32 size,
- struct nouveau_object **pobject)
-{
- struct nvd0_pwr_priv *priv;
- int ret;
-
- ret = nouveau_pwr_create(parent, engine, oclass, &priv);
- *pobject = nv_object(priv);
- if (ret)
- return ret;
-
- priv->base.code.data = nvd0_pwr_code;
- priv->base.code.size = sizeof(nvd0_pwr_code);
- priv->base.data.data = nvd0_pwr_data;
- priv->base.data.size = sizeof(nvd0_pwr_data);
- return 0;
-}
-
-struct nouveau_oclass
-nvd0_pwr_oclass = {
- .handle = NV_SUBDEV(PWR, 0xd0),
- .ofuncs = &(struct nouveau_ofuncs) {
- .ctor = nvd0_pwr_ctor,
+struct nouveau_oclass *
+nvd0_pwr_oclass = &(struct nvkm_pwr_impl) {
+ .base.handle = NV_SUBDEV(PWR, 0xd0),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = _nouveau_pwr_ctor,
.dtor = _nouveau_pwr_dtor,
.init = _nouveau_pwr_init,
.fini = _nouveau_pwr_fini,
},
-};
+ .code.data = nvd0_pwr_code,
+ .code.size = sizeof(nvd0_pwr_code),
+ .data.data = nvd0_pwr_data,
+ .data.size = sizeof(nvd0_pwr_data),
+}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
new file mode 100644
index 000000000000..3814a341db32
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/priv.h
@@ -0,0 +1,44 @@
+#ifndef __NVKM_PWR_PRIV_H__
+#define __NVKM_PWR_PRIV_H__
+
+#include <subdev/pwr.h>
+#include <subdev/pwr/fuc/os.h>
+
+#define nouveau_pwr_create(p, e, o, d) \
+ nouveau_pwr_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_pwr_destroy(p) \
+ nouveau_subdev_destroy(&(p)->base)
+#define nouveau_pwr_init(p) ({ \
+ struct nouveau_pwr *_ppwr = (p); \
+ _nouveau_pwr_init(nv_object(_ppwr)); \
+})
+#define nouveau_pwr_fini(p,s) ({ \
+ struct nouveau_pwr *_ppwr = (p); \
+ _nouveau_pwr_fini(nv_object(_ppwr), (s)); \
+})
+
+int nouveau_pwr_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int, void **);
+
+int _nouveau_pwr_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+#define _nouveau_pwr_dtor _nouveau_subdev_dtor
+int _nouveau_pwr_init(struct nouveau_object *);
+int _nouveau_pwr_fini(struct nouveau_object *, bool);
+
+struct nvkm_pwr_impl {
+ struct nouveau_oclass base;
+ struct {
+ u32 *data;
+ u32 size;
+ } code;
+ struct {
+ u32 *data;
+ u32 size;
+ } data;
+
+ void (*pgob)(struct nouveau_pwr *, bool);
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index 668cf964e4a9..2d0988755530 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -28,7 +28,7 @@
#include <subdev/timer.h>
#include <subdev/fb.h>
#include <subdev/vm.h>
-#include <subdev/ltcg.h>
+#include <subdev/ltc.h>
#include <subdev/bar.h>
struct nvc0_vmmgr_priv {
@@ -116,12 +116,12 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
pte <<= 3;
if (mem->tag) {
- struct nouveau_ltcg *ltcg =
- nouveau_ltcg(vma->vm->vmm->base.base.parent);
+ struct nouveau_ltc *ltc =
+ nouveau_ltc(vma->vm->vmm->base.base.parent);
u32 tag = mem->tag->offset + (delta >> 17);
phys |= (u64)tag << (32 + 12);
next |= (u64)1 << (32 + 12);
- ltcg->tags_clear(ltcg, tag, cnt);
+ ltc->tags_clear(ltc, tag, cnt);
}
while (cnt--) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c
index 2a15b98b4d2b..c6361422a0b2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/arb.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c
@@ -198,12 +198,12 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
int *burst, int *lwm)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nv_fifo_info fifo_data;
struct nv_sim_state sim_data;
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
- uint32_t cfg1 = nv_rd32(device, NV04_PFB_CFG1);
+ uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
@@ -221,13 +221,13 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
sim_data.mem_latency = 3;
sim_data.mem_page_miss = 10;
} else {
- sim_data.memory_type = nv_rd32(device, NV04_PFB_CFG0) & 0x1;
- sim_data.memory_width = (nv_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
+ sim_data.memory_type = nvif_rd32(device, NV04_PFB_CFG0) & 0x1;
+ sim_data.memory_width = (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
sim_data.mem_latency = cfg1 & 0xf;
sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
}
- if (nv_device(drm->device)->card_type == NV_04)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
nv04_calc_arb(&fifo_data, &sim_data);
else
nv10_calc_arb(&fifo_data, &sim_data);
@@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->card_type < NV_20)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN)
nv04_update_arb(dev, vclk, bpp, burst, lwm);
else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
(dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 41be3424c906..b90aa5c1f90a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -111,8 +111,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bios *bios = nouveau_bios(drm->device);
- struct nouveau_clock *clk = nouveau_clock(drm->device);
+ struct nouveau_bios *bios = nvkm_bios(&drm->device);
+ struct nouveau_clock *clk = nvkm_clock(&drm->device);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
@@ -136,7 +136,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
* has yet been observed in allowing the use a single stage pll on all
* nv43 however. the behaviour of single stage use is untested on nv40
*/
- if (nv_device(drm->device)->chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
+ if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
@@ -146,10 +146,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
/* The blob uses this always, so let's do the same */
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
/* again nv40 and some nv43 act more like nv3x as described above */
- if (nv_device(drm->device)->chipset < 0x41)
+ if (drm->device.info.chipset < 0x41)
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
@@ -275,7 +275,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
horizEnd = horizTotal - 2;
horizBlankEnd = horizTotal + 4;
#if 0
- if (dev->overlayAdaptor && nv_device(drm->device)->card_type >= NV_10)
+ if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* This reportedly works around some video overlay bandwidth problems */
horizTotal += 2;
#endif
@@ -509,7 +509,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
- if (nv_device(drm->device)->chipset >= 0x11)
+ if (drm->device.info.chipset >= 0x11)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
@@ -550,26 +550,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
* 1 << 30 on 0x60.830), for no apparent reason */
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
regp->crtc_830 = mode->crtc_vdisplay - 3;
regp->crtc_834 = mode->crtc_vdisplay - 1;
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
/* This is what the blob does */
regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
else
regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
/* Some misc regs */
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->CRTC[NV_CIO_CRE_85] = 0xFF;
regp->CRTC[NV_CIO_CRE_86] = 0x1;
}
@@ -581,7 +581,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
/* Generic PRAMDAC regs */
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
/* Only bit that bios and blob set. */
regp->nv10_cursync = (1 << 25);
@@ -590,7 +590,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
if (crtc->primary->fb->depth == 16)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
- if (nv_device(drm->device)->chipset >= 0x11)
+ if (drm->device.info.chipset >= 0x11)
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
@@ -653,7 +653,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc_mode_set_vga(crtc, adjusted_mode);
/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
nv_crtc_mode_set_regs(crtc, adjusted_mode);
nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
@@ -714,7 +714,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
/* Some more preparation. */
NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
}
@@ -888,7 +888,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (nv_device(drm->device)->card_type >= NV_20) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
}
@@ -915,9 +915,9 @@ nv04_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_device *dev = drm->dev;
if (state == ENTER_ATOMIC_MODE_SET)
- nouveau_fbcon_save_disable_accel(dev);
+ nouveau_fbcon_accel_save_disable(dev);
else
- nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_accel_restore(dev);
return nv04_crtc_do_mode_set_base(crtc, fb, x, y, true);
}
@@ -969,7 +969,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->chipset == 0x11) {
+ if (drm->device.info.chipset == 0x11) {
pixel = ((pixel & 0x000000ff) << 24) |
((pixel & 0x0000ff00) << 8) |
((pixel & 0x00ff0000) >> 8) |
@@ -1010,7 +1010,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
if (ret)
goto out;
- if (nv_device(drm->device)->chipset >= 0x11)
+ if (drm->device.info.chipset >= 0x11)
nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
else
nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index a810303169de..4e61173c3353 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index a96dda48718e..2d8056cde996 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -65,8 +65,8 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
static int sample_load_twice(struct drm_device *dev, bool sense[2])
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_timer *ptimer = nouveau_timer(device);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nouveau_timer *ptimer = nvkm_timer(device);
int i;
for (i = 0; i < 2; i++) {
@@ -95,15 +95,15 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
udelay(100);
/* when level triggers, sense is _LO_ */
- sense_a = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+ sense_a = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
/* take another reading until it agrees with sense_a... */
do {
udelay(100);
- sense_b = nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+ sense_b = nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
if (sense_a != sense_b) {
sense_b_prime =
- nv_rd08(device, NV_PRMCIO_INP0) & 0x10;
+ nvif_rd08(device, NV_PRMCIO_INP0) & 0x10;
if (sense_b == sense_b_prime) {
/* ... unless two consecutive subsequent
* samples agree; sense_a is replaced */
@@ -128,7 +128,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
@@ -164,11 +164,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
- nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
+ nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
for (i = 0; i < 3; i++)
- saved_palette0[i] = nv_rd08(device, NV_PRMDIO_PALETTE_DATA);
- saved_palette_mask = nv_rd08(device, NV_PRMDIO_PIXEL_MASK);
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
+ saved_palette0[i] = nvif_rd08(device, NV_PRMDIO_PALETTE_DATA);
+ saved_palette_mask = nvif_rd08(device, NV_PRMDIO_PIXEL_MASK);
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, 0);
saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
@@ -181,11 +181,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
do {
bool sense_pair[2];
- nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
+ nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, 0);
/* testing blue won't find monochrome monitors. I don't care */
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, blue);
i = 0;
/* take sample pairs until both samples in the pair agree */
@@ -208,11 +208,11 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
} while (++blue < 0x18 && sense);
out:
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
- nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
+ nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
for (i = 0; i < 3; i++)
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
@@ -231,8 +231,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(device);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ struct nouveau_gpio *gpio = nvkm_gpio(device);
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -256,12 +256,12 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
- saved_powerctrl_2 = nv_rd32(device, NV_PBUS_POWERCTRL_2);
+ saved_powerctrl_2 = nvif_rd32(device, NV_PBUS_POWERCTRL_2);
- nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
if (regoffset == 0x68) {
- saved_powerctrl_4 = nv_rd32(device, NV_PBUS_POWERCTRL_4);
- nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
+ saved_powerctrl_4 = nvif_rd32(device, NV_PBUS_POWERCTRL_4);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
if (gpio) {
@@ -283,7 +283,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
routput = (saved_routput & 0xfffffece) | head << 8;
- if (nv_device(drm->device)->card_type >= NV_40) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
if (dcb->type == DCB_OUTPUT_TV)
routput |= 0x1a << 16;
else
@@ -316,8 +316,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
if (regoffset == 0x68)
- nv_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
- nv_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
if (gpio) {
gpio->set(gpio, 0, DCB_GPIO_TVDAC1, 0xff, saved_gpio1);
@@ -398,7 +398,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
}
/* This could use refinement for flatpanels, but it should work this way */
- if (nv_device(drm->device)->chipset < 0x44)
+ if (drm->device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index e57babb206d3..42a5435259f7 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
@@ -335,7 +335,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
else /* gpu needs to scale */
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
- if (nv_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
+ if (nvif_rd32(device, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
output_mode->clock > 165000)
@@ -416,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
(nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) {
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
int i;
@@ -427,7 +427,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
}
}
} else {
- if (nv_device(drm->device)->chipset != 0x11) {
+ if (drm->device.info.chipset != 0x11) {
/* reset them */
int i;
for (i = 0; i < 3; i++) {
@@ -463,7 +463,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
/* This could use refinement for flatpanels, but it should work this way */
- if (nv_device(drm->device)->chipset < 0x44)
+ if (drm->device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
@@ -485,7 +485,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
{
#ifdef __powerpc__
struct drm_device *dev = encoder->dev;
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
/* BIOS scripts usually take care of the backlight, thanks
* Apple for your consistency.
@@ -623,7 +623,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, 2);
struct nouveau_i2c_board_info info[] = {
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 4342fdaee707..3d0afa1c6cff 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -22,9 +22,6 @@
* Author: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/class.h>
-
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -34,8 +31,6 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
-#include <subdev/i2c.h>
-
int
nv04_display_early_init(struct drm_device *dev)
{
@@ -58,7 +53,7 @@ int
nv04_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
@@ -70,6 +65,8 @@ nv04_display_create(struct drm_device *dev)
if (!disp)
return -ENOMEM;
+ nvif_object_map(nvif_object(&drm->device));
+
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv04_display_destroy;
nouveau_display(dev)->init = nv04_display_init;
@@ -144,6 +141,7 @@ void
nv04_display_destroy(struct drm_device *dev)
{
struct nv04_display *disp = nv04_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_encoder *encoder;
struct drm_crtc *crtc;
@@ -170,6 +168,8 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_display(dev)->priv = NULL;
kfree(disp);
+
+ nvif_object_unmap(nvif_object(&drm->device));
}
int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 4245fc3dab70..17b899d9aba3 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -131,7 +131,7 @@ nv_two_heads(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
const int impl = dev->pdev->device & 0x0ff0;
- if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
return true;
@@ -150,7 +150,7 @@ nv_two_reg_pll(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
const int impl = dev->pdev->device & 0x0ff0;
- if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
+ if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE)
return true;
return false;
}
@@ -171,8 +171,8 @@ static inline void
nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
struct dcb_output *outp, int crtc)
{
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_bios *bios = nouveau_bios(device);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_bios *bios = nvkm_bios(&drm->device);
struct nvbios_init init = {
.subdev = nv_subdev(bios),
.bios = bios,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index aca76af115b3..3d4c19300768 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -27,9 +27,6 @@
#include "hw.h"
#include <subdev/bios/pll.h>
-#include <subdev/fb.h>
-#include <subdev/clock.h>
-#include <subdev/timer.h>
#define CHIPSET_NFORCE 0x01a0
#define CHIPSET_NFORCE2 0x01f0
@@ -92,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner)
if (owner == 1)
owner *= 3;
- if (nv_device(drm->device)->chipset == 0x11) {
+ if (drm->device.info.chipset == 0x11) {
/* This might seem stupid, but the blob does it and
* omitting it often locks the system up.
*/
@@ -103,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner)
/* CR44 is always changed on CRTC0 */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
- if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */
+ if (drm->device.info.chipset == 0x11) { /* set me harder */
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
}
@@ -152,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
pllvals->NM1 = pll1 & 0xffff;
if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
pllvals->NM2 = pll2 & 0xffff;
- else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) {
+ else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) {
pllvals->M1 &= 0xf; /* only 4 bits */
if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
pllvals->M2 = (pll1 >> 4) & 0x7;
@@ -168,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
struct nouveau_pll_vals *pllvals)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_bios *bios = nouveau_bios(device);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_bios *bios = nvkm_bios(device);
uint32_t reg1, pll1, pll2 = 0;
struct nvbios_pll pll_lim;
int ret;
@@ -178,16 +175,16 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
if (ret || !(reg1 = pll_lim.reg))
return -ENOENT;
- pll1 = nv_rd32(device, reg1);
+ pll1 = nvif_rd32(device, reg1);
if (reg1 <= 0x405c)
- pll2 = nv_rd32(device, reg1 + 4);
+ pll2 = nvif_rd32(device, reg1 + 4);
else if (nv_two_reg_pll(dev)) {
uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
- pll2 = nv_rd32(device, reg2);
+ pll2 = nvif_rd32(device, reg2);
}
- if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
/* check whether vpll has been forced into single stage mode */
@@ -255,9 +252,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_clock *clk = nouveau_clock(device);
- struct nouveau_bios *bios = nouveau_bios(device);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_clock *clk = nvkm_clock(device);
+ struct nouveau_bios *bios = nvkm_bios(device);
struct nvbios_pll pll_lim;
struct nouveau_pll_vals pv;
enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
@@ -394,21 +391,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
int i;
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
if (nv_two_heads(dev))
state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
if (nv_gf4_disp_arch(dev))
regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
- if (nv_device(drm->device)->chipset >= 0x30)
+ if (drm->device.info.chipset >= 0x30)
regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
@@ -450,7 +447,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
if (nv_gf4_disp_arch(dev))
regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
@@ -466,26 +463,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_clock *clk = nouveau_clock(drm->device);
+ struct nouveau_clock *clk = nvkm_clock(&drm->device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
int i;
- if (nv_device(drm->device)->card_type >= NV_10)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
clk->pll_prog(clk, pllreg, &regp->pllvals);
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
if (nv_two_heads(dev))
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
- if (nv_device(drm->device)->chipset >= 0x30)
+ if (drm->device.info.chipset >= 0x30)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
@@ -522,7 +519,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
if (nv_gf4_disp_arch(dev))
NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
@@ -603,10 +600,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
- if (nv_device(drm->device)->card_type >= NV_20)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
rd_cio_state(dev, head, regp, 0x9f);
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
@@ -615,14 +612,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
if (nv_two_heads(dev))
@@ -634,7 +631,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -663,14 +660,13 @@ nv_load_state_ext(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_timer *ptimer = nouveau_timer(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_timer *ptimer = nvkm_timer(device);
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
uint32_t reg900;
int i;
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
if (nv_two_heads(dev))
/* setting ENGINE_CTRL (EC) *must* come before
* CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
@@ -678,24 +674,24 @@ nv_load_state_ext(struct drm_device *dev, int head,
*/
NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
- nv_wr32(device, NV_PVIDEO_STOP, 1);
- nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
- nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
- nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
- nv_wr32(device, NV_PVIDEO_LIMIT(0), pfb->ram->size - 1);
- nv_wr32(device, NV_PVIDEO_LIMIT(1), pfb->ram->size - 1);
- nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), pfb->ram->size - 1);
- nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), pfb->ram->size - 1);
- nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
+ nvif_wr32(device, NV_PVIDEO_STOP, 1);
+ nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
+ nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
+ nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(0), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_LIMIT(1), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), device->info.ram_size - 1);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
- if (nv_device(drm->device)->card_type == NV_40) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
@@ -718,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (nv_device(drm->device)->card_type >= NV_20)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
- if (nv_device(drm->device)->card_type >= NV_30)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
wr_cio_state(dev, head, regp, 0x9f);
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, head);
wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
@@ -742,7 +738,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
}
/* NV11 and NV20 stop at 0x52. */
if (nv_gf4_disp_arch(dev)) {
- if (nv_device(drm->device)->card_type < NV_20) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
/* Not waiting for vertical retrace before modifying
CRE_53/CRE_54 causes lockups. */
nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
@@ -769,15 +765,15 @@ static void
nv_save_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
int head_offset = head * NV_PRMDIO_SIZE, i;
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
NV_PRMDIO_PIXEL_MASK_MASK);
- nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
+ nvif_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
for (i = 0; i < 768; i++) {
- state->crtc_reg[head].DAC[i] = nv_rd08(device,
+ state->crtc_reg[head].DAC[i] = nvif_rd08(device,
NV_PRMDIO_PALETTE_DATA + head_offset);
}
@@ -788,15 +784,15 @@ void
nouveau_hw_load_state_palette(struct drm_device *dev, int head,
struct nv04_mode_state *state)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
int head_offset = head * NV_PRMDIO_SIZE, i;
- nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
+ nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
NV_PRMDIO_PIXEL_MASK_MASK);
- nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
+ nvif_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
for (i = 0; i < 768; i++) {
- nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
+ nvif_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
state->crtc_reg[head].DAC[i]);
}
@@ -808,7 +804,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head,
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->chipset == 0x11)
+ if (drm->device.info.chipset == 0x11)
/* NB: no attempt is made to restore the bad pll later on */
nouveau_hw_fix_bad_vpll(dev, head);
nv_save_state_ramdac(dev, head, state);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h
index eeb70d912d99..7f53c571f31f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h
@@ -60,41 +60,41 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
static inline uint32_t NVReadCRTC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint32_t val;
if (head)
reg += NV_PCRTC0_SIZE;
- val = nv_rd32(device, reg);
+ val = nvif_rd32(device, reg);
return val;
}
static inline void NVWriteCRTC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
if (head)
reg += NV_PCRTC0_SIZE;
- nv_wr32(device, reg, val);
+ nvif_wr32(device, reg, val);
}
static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint32_t val;
if (head)
reg += NV_PRAMDAC0_SIZE;
- val = nv_rd32(device, reg);
+ val = nvif_rd32(device, reg);
return val;
}
static inline void NVWriteRAMDAC(struct drm_device *dev,
int head, uint32_t reg, uint32_t val)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
if (head)
reg += NV_PRAMDAC0_SIZE;
- nv_wr32(device, reg, val);
+ nvif_wr32(device, reg, val);
}
static inline uint8_t nv_read_tmds(struct drm_device *dev,
@@ -120,18 +120,18 @@ static inline void nv_write_tmds(struct drm_device *dev,
static inline void NVWriteVgaCrtc(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
- nv_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
}
static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
int head, uint8_t index)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint8_t val;
- nv_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
- val = nv_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
+ val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
return val;
}
@@ -165,74 +165,74 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
int head, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
uint8_t val;
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
* NVSetOwner for the relevant head to be programmed */
- if (head && nv_device(drm->device)->card_type == NV_40)
+ if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
reg += NV_PRMVIO_SIZE;
- val = nv_rd08(device, reg);
+ val = nvif_rd08(device, reg);
return val;
}
static inline void NVWritePRMVIO(struct drm_device *dev,
int head, uint32_t reg, uint8_t value)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
* NVSetOwner for the relevant head to be programmed */
- if (head && nv_device(drm->device)->card_type == NV_40)
+ if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
reg += NV_PRMVIO_SIZE;
- nv_wr08(device, reg, value);
+ nvif_wr08(device, reg, value);
}
static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
}
static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- return !(nv_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
}
static inline void NVWriteVgaAttr(struct drm_device *dev,
int head, uint8_t index, uint8_t value)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
else
index |= 0x20;
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
- nv_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ nvif_wr08(device, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
}
static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
int head, uint8_t index)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
uint8_t val;
if (NVGetEnablePalette(dev, head))
index &= ~0x20;
else
index |= 0x20;
- nv_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
- nv_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
- val = nv_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
+ nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
+ nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
+ val = nvif_rd08(device, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
return val;
}
@@ -259,11 +259,11 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
static inline bool
nv_heads_tied(struct drm_device *dev)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
- if (nv_device(drm->device)->chipset == 0x11)
- return !!(nv_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
+ if (drm->device.info.chipset == 0x11)
+ return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
}
@@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
/* NV11 has independently lockable extended crtcs, except when tied */
- if (nv_device(drm->device)->chipset == 0x11 && !nv_heads_tied(dev))
+ if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev))
NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
lock ? NV_CIO_SR_LOCK_VALUE :
NV_CIO_SR_UNLOCK_RW_VALUE);
@@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- return nv_device(drm->device)->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
+ return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
}
static inline void
@@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
- if (nv_device(drm->device)->card_type == NV_04) {
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) {
/*
* Hilarious, the 24th bit doesn't want to stick to
* PCRTC_START...
@@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
nv_fix_nv40_hw_cursor(dev, head);
}
@@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
bpp = 8;
/* Alignment requirements taken from the Haiku driver */
- if (nv_device(drm->device)->card_type == NV_04)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
mask = 128 / bpp - 1;
else
mask = 512 / bpp - 1;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index ab03f7719d2d..b36afcbbc83f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -96,7 +96,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -117,7 +117,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (format > 0xffff)
return -ERANGE;
- if (dev->chipset >= 0x30) {
+ if (dev->info.chipset >= 0x30) {
if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
return -ERANGE;
} else {
@@ -131,17 +131,17 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo;
- nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
- nv_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
+ nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
+ nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
- nv_wr32(dev, NV_PVIDEO_BASE(flip), 0);
- nv_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
- nv_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
- nv_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
- nv_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
- nv_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
- nv_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
- nv_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
+ nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
+ nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+ nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
+ nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
+ nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
+ nvif_wr32(dev, NV_PVIDEO_DT_DY(flip), (src_h << 20) / crtc_h);
+ nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x);
+ nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w);
if (fb->pixel_format != DRM_FORMAT_UYVY)
format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8;
@@ -153,14 +153,14 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
if (fb->pixel_format == DRM_FORMAT_NV12) {
- nv_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
- nv_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
+ nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
+ nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
nv_fb->nvbo->bo.offset + fb->offsets[1]);
}
- nv_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
- nv_wr32(dev, NV_PVIDEO_STOP, 0);
+ nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format);
+ nvif_wr32(dev, NV_PVIDEO_STOP, 0);
/* TODO: wait for vblank? */
- nv_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
+ nvif_wr32(dev, NV_PVIDEO_BUFFER, flip ? 0x10 : 0x1);
nv_plane->flip = !flip;
if (cur)
@@ -172,10 +172,10 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv10_disable_plane(struct drm_plane *plane)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
- nv_wr32(dev, NV_PVIDEO_STOP, 1);
+ nvif_wr32(dev, NV_PVIDEO_STOP, 1);
if (nv_plane->cur) {
nouveau_bo_unpin(nv_plane->cur);
nv_plane->cur = NULL;
@@ -195,24 +195,24 @@ nv_destroy_plane(struct drm_plane *plane)
static void
nv10_set_params(struct nouveau_plane *plane)
{
- struct nouveau_device *dev = nouveau_dev(plane->base.dev);
+ struct nvif_device *dev = &nouveau_drm(plane->base.dev)->device;
u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
(cos_mul(plane->hue, plane->saturation) & 0xffff);
u32 format = 0;
- nv_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
- nv_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
- nv_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
- nv_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
- nv_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
+ nvif_wr32(dev, NV_PVIDEO_LUMINANCE(0), luma);
+ nvif_wr32(dev, NV_PVIDEO_LUMINANCE(1), luma);
+ nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(0), chroma);
+ nvif_wr32(dev, NV_PVIDEO_CHROMINANCE(1), chroma);
+ nvif_wr32(dev, NV_PVIDEO_COLOR_KEY, plane->colorkey & 0xffffff);
if (plane->cur) {
if (plane->iturbt_709)
format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709;
if (plane->colorkey & (1 << 24))
format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY;
- nv_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
+ nvif_mask(dev, NV_PVIDEO_FORMAT(plane->flip),
NV_PVIDEO_FORMAT_MATRIX_ITURBT709 |
NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY,
format);
@@ -256,7 +256,7 @@ static const struct drm_plane_funcs nv10_plane_funcs = {
static void
nv10_overlay_init(struct drm_device *device)
{
- struct nouveau_device *dev = nouveau_dev(device);
+ struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
int num_formats = ARRAY_SIZE(formats);
int ret;
@@ -264,7 +264,7 @@ nv10_overlay_init(struct drm_device *device)
if (!plane)
return;
- switch (dev->chipset) {
+ switch (drm->device.info.chipset) {
case 0x10:
case 0x11:
case 0x15:
@@ -333,7 +333,7 @@ cleanup:
drm_plane_cleanup(&plane->base);
err:
kfree(plane);
- nv_error(dev, "Failed to create plane\n");
+ NV_ERROR(drm, "Failed to create plane\n");
}
static int
@@ -343,7 +343,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_bo *cur = nv_plane->cur;
@@ -375,43 +375,43 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
nv_plane->cur = nv_fb->nvbo;
- nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
for (i = 0; i < 2; i++) {
- nv_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
nv_fb->nvbo->bo.offset);
- nv_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
- nv_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i, pitch);
+ nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
}
- nv_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
- nv_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
- nv_wr32(dev, NV_PVIDEO_STEP_SIZE,
+ nvif_wr32(dev, NV_PVIDEO_WINDOW_START, crtc_y << 16 | crtc_x);
+ nvif_wr32(dev, NV_PVIDEO_WINDOW_SIZE, crtc_h << 16 | crtc_w);
+ nvif_wr32(dev, NV_PVIDEO_STEP_SIZE,
(uint32_t)(((src_h - 1) << 11) / (crtc_h - 1)) << 16 | (uint32_t)(((src_w - 1) << 11) / (crtc_w - 1)));
/* It should be possible to convert hue/contrast to this */
- nv_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
- nv_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
- nv_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
- nv_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
+ nvif_wr32(dev, NV_PVIDEO_RED_CSC_OFFSET, 0x69 - brightness);
+ nvif_wr32(dev, NV_PVIDEO_GREEN_CSC_OFFSET, 0x3e + brightness);
+ nvif_wr32(dev, NV_PVIDEO_BLUE_CSC_OFFSET, 0x89 - brightness);
+ nvif_wr32(dev, NV_PVIDEO_CSC_ADJUST, 0);
- nv_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
- nv_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
+ nvif_wr32(dev, NV_PVIDEO_CONTROL_Y, 0x001); /* (BLUR_ON, LINE_HALF) */
+ nvif_wr32(dev, NV_PVIDEO_CONTROL_X, 0x111); /* (WEIGHT_HEAVY, SHARPENING_ON, SMOOTHING_ON) */
- nv_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
- nv_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
+ nvif_wr32(dev, NV_PVIDEO_FIFO_BURST_LENGTH, 0x03);
+ nvif_wr32(dev, NV_PVIDEO_FIFO_THRES_SIZE, 0x38);
- nv_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
+ nvif_wr32(dev, NV_PVIDEO_KEY, nv_plane->colorkey);
if (nv_plane->colorkey & (1 << 24))
overlay |= 0x10;
if (fb->pixel_format == DRM_FORMAT_YUYV)
overlay |= 0x100;
- nv_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
+ nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay);
- nv_wr32(dev, NV_PVIDEO_SU_STATE, nv_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
+ nvif_wr32(dev, NV_PVIDEO_SU_STATE, nvif_rd32(dev, NV_PVIDEO_SU_STATE) ^ (1 << 16));
if (cur)
nouveau_bo_unpin(cur);
@@ -422,13 +422,13 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
static int
nv04_disable_plane(struct drm_plane *plane)
{
- struct nouveau_device *dev = nouveau_dev(plane->dev);
+ struct nvif_device *dev = &nouveau_drm(plane->dev)->device;
struct nouveau_plane *nv_plane = (struct nouveau_plane *)plane;
- nv_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
- nv_wr32(dev, NV_PVIDEO_OE_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_SU_STATE, 0);
- nv_wr32(dev, NV_PVIDEO_RM_STATE, 0);
+ nvif_mask(dev, NV_PVIDEO_OVERLAY, 1, 0);
+ nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
+ nvif_wr32(dev, NV_PVIDEO_RM_STATE, 0);
if (nv_plane->cur) {
nouveau_bo_unpin(nv_plane->cur);
nv_plane->cur = NULL;
@@ -447,7 +447,7 @@ static const struct drm_plane_funcs nv04_plane_funcs = {
static void
nv04_overlay_init(struct drm_device *device)
{
- struct nouveau_device *dev = nouveau_dev(device);
+ struct nouveau_drm *drm = nouveau_drm(device);
struct nouveau_plane *plane = kzalloc(sizeof(struct nouveau_plane), GFP_KERNEL);
int ret;
@@ -483,15 +483,15 @@ cleanup:
drm_plane_cleanup(&plane->base);
err:
kfree(plane);
- nv_error(dev, "Failed to create plane\n");
+ NV_ERROR(drm, "Failed to create plane\n");
}
void
nouveau_overlay_init(struct drm_device *device)
{
- struct nouveau_device *dev = nouveau_dev(device);
- if (dev->chipset < 0x10)
+ struct nvif_device *dev = &nouveau_drm(device)->device;
+ if (dev->info.chipset < 0x10)
nv04_overlay_init(device);
- else if (dev->chipset <= 0x40)
+ else if (dev->info.chipset <= 0x40)
nv10_overlay_init(device);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 8667620b703a..8061d8d0ce79 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -35,8 +35,6 @@
#include <drm/i2c/ch7006.h>
-#include <subdev/i2c.h>
-
static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
{
{
@@ -56,7 +54,7 @@ static struct nouveau_i2c_board_info nv04_tv_encoder_info[] = {
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
return i2c->identify(i2c, i2c_index, "TV encoder",
nv04_tv_encoder_info, NULL, NULL);
@@ -206,7 +204,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
struct drm_encoder *encoder;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
int type, ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index 195bd8e86c6a..72d2ab04db47 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -34,11 +34,6 @@
#include "hw.h"
#include "tvnv17.h"
-#include <core/device.h>
-
-#include <subdev/bios/gpio.h>
-#include <subdev/gpio.h>
-
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -51,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -135,17 +130,17 @@ static bool
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_object *device = drm->device;
+ struct nvif_device *device = &drm->device;
/* Zotac FX5200 */
- if (nv_device_match(device, 0x0322, 0x19da, 0x1035) ||
- nv_device_match(device, 0x0322, 0x19da, 0x2035)) {
+ if (nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x1035) ||
+ nv_device_match(nvkm_object(device), 0x0322, 0x19da, 0x2035)) {
*pin_mask = 0xc;
return false;
}
/* MSI nForce2 IGP */
- if (nv_device_match(device, 0x01f0, 0x1462, 0x5710)) {
+ if (nv_device_match(nvkm_object(device), 0x01f0, 0x1462, 0x5710)) {
*pin_mask = 0xc;
return false;
}
@@ -167,8 +162,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
return connector_status_disconnected;
if (reliable) {
- if (nv_device(drm->device)->chipset == 0x42 ||
- nv_device(drm->device)->chipset == 0x43)
+ if (drm->device.info.chipset == 0x42 ||
+ drm->device.info.chipset == 0x43)
tv_enc->pin_mask =
nv42_tv_sample_load(encoder) >> 28 & 0xe;
else
@@ -375,7 +370,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -448,7 +443,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
/* Set the DACCLK register */
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
- if (nv_device(drm->device)->card_type == NV_40)
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
dacclk |= 0x1a << 16;
if (tv_norm->kind == CTV_ENC_MODE) {
@@ -505,7 +500,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
tv_regs->ptv_614 = 0x13;
}
- if (nv_device(drm->device)->card_type >= NV_30) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
tv_regs->ptv_500 = 0xe8e0;
tv_regs->ptv_504 = 0x1710;
tv_regs->ptv_604 = 0x0;
@@ -600,7 +595,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
/* This could use refinement for flatpanels, but it should work */
- if (nv_device(drm->device)->chipset < 0x44)
+ if (drm->device.info.chipset < 0x44)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
nv04_dac_output_offset(encoder),
0xf0000000);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
index 7b331543a41b..225894cdcac2 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h
@@ -130,14 +130,14 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct nouveau_device *device = nouveau_dev(dev);
- nv_wr32(device, reg, val);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ nvif_wr32(device, reg, val);
}
static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
{
- struct nouveau_device *device = nouveau_dev(dev);
- return nv_rd32(device, reg);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
+ return nvif_rd32(device, reg);
}
static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg,
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index b13f441c6431..615714c1727d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,16 +21,10 @@
*
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/device.h>
-#include <core/class.h>
-#include <core/mm.h>
-
-#include <subdev/fb.h>
-#include <subdev/timer.h>
-#include <subdev/instmem.h>
-#include <engine/graph.h>
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/ioctl.h>
+#include <nvif/class.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -47,20 +41,20 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
struct nouveau_abi16 *abi16;
cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
if (cli->abi16) {
+ struct nv_device_v0 args = {
+ .device = ~0ULL,
+ };
+
INIT_LIST_HEAD(&abi16->channels);
- abi16->client = nv_object(cli);
/* allocate device object targeting client's default
* device (ie. the one that belongs to the fd it
* opened)
*/
- if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
- NVDRM_DEVICE, 0x0080,
- &(struct nv_device_class) {
- .device = ~0ULL,
- },
- sizeof(struct nv_device_class),
- &abi16->device) == 0)
+ if (nvif_device_init(&cli->base.base, NULL,
+ NOUVEAU_ABI16_DEVICE, NV_DEVICE,
+ &args, sizeof(args),
+ &abi16->device) == 0)
return cli->abi16;
kfree(cli->abi16);
@@ -75,7 +69,7 @@ nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
int
nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
{
- struct nouveau_cli *cli = (void *)abi16->client;
+ struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
mutex_unlock(&cli->mutex);
return ret;
}
@@ -83,21 +77,19 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
u16
nouveau_abi16_swclass(struct nouveau_drm *drm)
{
- switch (nv_device(drm->device)->card_type) {
- case NV_04:
+ switch (drm->device.info.family) {
+ case NV_DEVICE_INFO_V0_TNT:
return 0x006e;
- case NV_10:
- case NV_11:
- case NV_20:
- case NV_30:
- case NV_40:
+ case NV_DEVICE_INFO_V0_CELSIUS:
+ case NV_DEVICE_INFO_V0_KELVIN:
+ case NV_DEVICE_INFO_V0_RANKINE:
+ case NV_DEVICE_INFO_V0_CURIE:
return 0x016e;
- case NV_50:
+ case NV_DEVICE_INFO_V0_TESLA:
return 0x506e;
- case NV_C0:
- case NV_D0:
- case NV_E0:
- case GM100:
+ case NV_DEVICE_INFO_V0_FERMI:
+ case NV_DEVICE_INFO_V0_KEPLER:
+ case NV_DEVICE_INFO_V0_MAXWELL:
return 0x906e;
}
@@ -140,7 +132,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
/* destroy channel object, all children will be killed too */
if (chan->chan) {
- abi16->handles &= ~(1ULL << (chan->chan->handle & 0xffff));
+ abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
nouveau_channel_del(&chan->chan);
}
@@ -151,7 +143,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
void
nouveau_abi16_fini(struct nouveau_abi16 *abi16)
{
- struct nouveau_cli *cli = (void *)abi16->client;
+ struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
struct nouveau_abi16_chan *chan, *temp;
/* cleanup channels */
@@ -160,7 +152,7 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
}
/* destroy the device object */
- nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
+ nvif_device_fini(&abi16->device);
kfree(cli->abi16);
cli->abi16 = NULL;
@@ -169,30 +161,31 @@ nouveau_abi16_fini(struct nouveau_abi16 *abi16)
int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_timer *ptimer = nouveau_timer(device);
- struct nouveau_graph *graph = (void *)nouveau_engine(device, NVDEV_ENGINE_GR);
+ struct nvif_device *device = &drm->device;
+ struct nouveau_timer *ptimer = nvkm_timer(device);
+ struct nouveau_graph *graph = nvkm_gr(device);
struct drm_nouveau_getparam *getparam = data;
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
- getparam->value = device->chipset;
+ getparam->value = device->info.chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- if (nv_device_is_pci(device))
+ if (nv_device_is_pci(nvkm_device(device)))
getparam->value = dev->pdev->vendor;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- if (nv_device_is_pci(device))
+ if (nv_device_is_pci(nvkm_device(device)))
getparam->value = dev->pdev->device;
else
getparam->value = 0;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (!nv_device_is_pci(device))
+ if (!nv_device_is_pci(nvkm_device(device)))
getparam->value = 3;
else
if (drm_pci_device_is_agp(dev))
@@ -225,7 +218,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = graph->units ? graph->units(graph) : 0;
break;
default:
- nv_debug(device, "unknown parameter %lld\n", getparam->param);
+ NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
@@ -246,10 +239,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
- struct nouveau_client *client;
- struct nouveau_device *device;
- struct nouveau_instmem *imem;
- struct nouveau_fb *pfb;
+ struct nvif_device *device;
int ret;
if (unlikely(!abi16))
@@ -258,21 +248,18 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (!drm->channel)
return nouveau_abi16_put(abi16, -ENODEV);
- client = nv_client(abi16->client);
- device = nv_device(abi16->device);
- imem = nouveau_instmem(device);
- pfb = nouveau_fb(device);
+ device = &abi16->device;
/* hack to allow channel engine type specification on kepler */
- if (device->card_type >= NV_E0) {
+ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
if (init->fb_ctxdma_handle != ~0)
- init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
+ init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
else
init->fb_ctxdma_handle = init->tt_ctxdma_handle;
/* allow flips to be executed if this is a graphics channel */
init->tt_ctxdma_handle = 0;
- if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
+ if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR)
init->tt_ctxdma_handle = 1;
}
@@ -293,13 +280,14 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
abi16->handles |= (1ULL << init->channel);
/* create channel object and initialise dma and fence management */
- ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
- init->channel, init->fb_ctxdma_handle,
+ ret = nouveau_channel_new(drm, device,
+ NOUVEAU_ABI16_CHAN(init->channel),
+ init->fb_ctxdma_handle,
init->tt_ctxdma_handle, &chan->chan);
if (ret)
goto done;
- if (device->card_type >= NV_50)
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
@@ -308,10 +296,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- if (device->card_type < NV_10) {
+ if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
- init->subchan[1].handle = NvSw;
+ init->subchan[1].handle = chan->chan->nvsw.handle;
init->subchan[1].grclass = 0x506e;
init->nr_subchan = 2;
}
@@ -324,8 +312,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- if (device->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
&chan->ntfy_vma);
if (ret)
goto done;
@@ -343,6 +331,18 @@ done:
return nouveau_abi16_put(abi16, ret);
}
+static struct nouveau_abi16_chan *
+nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
+{
+ struct nouveau_abi16_chan *chan;
+
+ list_for_each_entry(chan, &abi16->channels, head) {
+ if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
+ return chan;
+ }
+
+ return NULL;
+}
int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
@@ -350,28 +350,38 @@ nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
struct drm_nouveau_channel_free *req = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_abi16_chan *chan;
- int ret = -ENOENT;
if (unlikely(!abi16))
return -ENOMEM;
- list_for_each_entry(chan, &abi16->channels, head) {
- if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
- nouveau_abi16_chan_fini(abi16, chan);
- return nouveau_abi16_put(abi16, 0);
- }
- }
-
- return nouveau_abi16_put(abi16, ret);
+ chan = nouveau_abi16_chan(abi16, req->channel);
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
+ nouveau_abi16_chan_fini(abi16, chan);
+ return nouveau_abi16_put(abi16, 0);
}
int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_grobj_alloc *init = data;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+ } args = {
+ .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
+ .ioctl.type = NVIF_IOCTL_V0_NEW,
+ .ioctl.path_nr = 3,
+ .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
+ .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
+ .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
+ .new.route = NVDRM_OBJECT_ABI16,
+ .new.handle = init->handle,
+ .new.oclass = init->class,
+ };
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_object *object;
+ struct nvif_client *client;
int ret;
if (unlikely(!abi16))
@@ -379,6 +389,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
if (init->handle == ~0)
return nouveau_abi16_put(abi16, -EINVAL);
+ client = nvif_client(nvif_object(&abi16->device));
/* compatibility with userspace that assumes 506e for all chipsets */
if (init->class == 0x506e) {
@@ -387,8 +398,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
return nouveau_abi16_put(abi16, 0);
}
- ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
- init->handle, init->class, NULL, 0, &object);
+ ret = nvif_client_ioctl(client, &args, sizeof(args));
return nouveau_abi16_put(abi16, ret);
}
@@ -396,29 +406,38 @@ int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_notifierobj_alloc *info = data;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+ struct nv_dma_v0 ctxdma;
+ } args = {
+ .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
+ .ioctl.type = NVIF_IOCTL_V0_NEW,
+ .ioctl.path_nr = 3,
+ .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
+ .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
+ .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
+ .new.route = NVDRM_OBJECT_ABI16,
+ .new.handle = info->handle,
+ .new.oclass = NV_DMA_IN_MEMORY,
+ };
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan = NULL, *temp;
+ struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
- struct nouveau_object *object;
- struct nv_dma_class args = {};
+ struct nvif_device *device = &abi16->device;
+ struct nvif_client *client;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
/* completely unnecessary for these chipsets... */
- if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
+ if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
return nouveau_abi16_put(abi16, -EINVAL);
+ client = nvif_client(nvif_object(&abi16->device));
- list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
- chan = temp;
- break;
- }
- }
-
+ chan = nouveau_abi16_chan(abi16, info->channel);
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
@@ -434,26 +453,29 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
- args.start = ntfy->node->offset;
- args.limit = ntfy->node->offset + ntfy->node->length - 1;
- if (device->card_type >= NV_50) {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
- args.start += chan->ntfy_vma.offset;
- args.limit += chan->ntfy_vma.offset;
+ args.ctxdma.start = ntfy->node->offset;
+ args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ args.ctxdma.target = NV_DMA_V0_TARGET_VM;
+ args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
+ args.ctxdma.start += chan->ntfy_vma.offset;
+ args.ctxdma.limit += chan->ntfy_vma.offset;
} else
if (drm->agp.stat == ENABLED) {
- args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
- args.start += drm->agp.base + chan->ntfy->bo.offset;
- args.limit += drm->agp.base + chan->ntfy->bo.offset;
+ args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
+ args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
+ args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
+ args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
+ client->super = true;
} else {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
- args.start += chan->ntfy->bo.offset;
- args.limit += chan->ntfy->bo.offset;
+ args.ctxdma.target = NV_DMA_V0_TARGET_VM;
+ args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
+ args.ctxdma.start += chan->ntfy->bo.offset;
+ args.ctxdma.limit += chan->ntfy->bo.offset;
}
- ret = nouveau_object_new(abi16->client, chan->chan->handle,
- ntfy->handle, 0x003d, &args,
- sizeof(args), &object);
+ ret = nvif_client_ioctl(client, &args, sizeof(args));
+ client->super = false;
if (ret)
goto done;
@@ -469,28 +491,36 @@ int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_gpuobj_free *fini = data;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_del del;
+ } args = {
+ .ioctl.owner = NVDRM_OBJECT_ABI16,
+ .ioctl.type = NVIF_IOCTL_V0_DEL,
+ .ioctl.path_nr = 4,
+ .ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
+ .ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
+ .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
+ .ioctl.path[0] = fini->handle,
+ };
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan = NULL, *temp;
+ struct nouveau_abi16_chan *chan;
struct nouveau_abi16_ntfy *ntfy;
+ struct nvif_client *client;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
- list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
- chan = temp;
- break;
- }
- }
-
+ chan = nouveau_abi16_chan(abi16, fini->channel);
if (!chan)
return nouveau_abi16_put(abi16, -ENOENT);
+ client = nvif_client(nvif_object(&abi16->device));
/* synchronize with the user channel and destroy the gpu object */
nouveau_channel_idle(chan->chan);
- ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
+ ret = nvif_client_ioctl(client, &args, sizeof(args));
if (ret)
return nouveau_abi16_put(abi16, ret);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 90004081a501..39844e6bfbff 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -28,8 +28,7 @@ struct nouveau_abi16_chan {
};
struct nouveau_abi16 {
- struct nouveau_object *client;
- struct nouveau_object *device;
+ struct nvif_device device;
struct list_head channels;
u64 handles;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_agp.c b/drivers/gpu/drm/nouveau/nouveau_agp.c
index 51666daddb94..1f6f6ba6847a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_agp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_agp.c
@@ -1,7 +1,5 @@
#include <linux/module.h>
-#include <core/device.h>
-
#include "nouveau_drm.h"
#include "nouveau_agp.h"
#include "nouveau_reg.h"
@@ -29,7 +27,7 @@ static struct nouveau_agpmode_quirk nouveau_agpmode_quirk_list[] = {
static unsigned long
get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
{
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nouveau_agpmode_quirk *quirk = nouveau_agpmode_quirk_list;
int agpmode = nouveau_agpmode;
unsigned long mode = info->mode;
@@ -38,7 +36,7 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
* FW seems to be broken on nv18, it makes the card lock up
* randomly.
*/
- if (device->chipset == 0x18)
+ if (device->info.chipset == 0x18)
mode &= ~PCI_AGP_COMMAND_FW;
/*
@@ -47,10 +45,10 @@ get_agp_mode(struct nouveau_drm *drm, const struct drm_agp_info *info)
while (agpmode == -1 && quirk->hostbridge_vendor) {
if (info->id_vendor == quirk->hostbridge_vendor &&
info->id_device == quirk->hostbridge_device &&
- device->pdev->vendor == quirk->chip_vendor &&
- device->pdev->device == quirk->chip_device) {
+ nvkm_device(device)->pdev->vendor == quirk->chip_vendor &&
+ nvkm_device(device)->pdev->device == quirk->chip_device) {
agpmode = quirk->mode;
- nv_info(device, "Forcing agp mode to %dX. Use agpmode to override.\n",
+ NV_INFO(drm, "Forcing agp mode to %dX. Use agpmode to override.\n",
agpmode);
break;
}
@@ -104,7 +102,7 @@ void
nouveau_agp_reset(struct nouveau_drm *drm)
{
#if __OS_HAS_AGP
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct drm_device *dev = drm->dev;
u32 save[2];
int ret;
@@ -115,7 +113,7 @@ nouveau_agp_reset(struct nouveau_drm *drm)
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card's
* AGP controller we might be locking ourselves out of it. */
- if ((nv_rd32(device, NV04_PBUS_PCI_NV_19) |
+ if ((nvif_rd32(device, NV04_PBUS_PCI_NV_19) |
dev->agp->mode) & PCI_AGP_COMMAND_FW) {
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -134,15 +132,15 @@ nouveau_agp_reset(struct nouveau_drm *drm)
/* clear busmaster bit, and disable AGP */
- save[0] = nv_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
- nv_wr32(device, NV04_PBUS_PCI_NV_19, 0);
+ save[0] = nvif_mask(device, NV04_PBUS_PCI_NV_1, 0x00000004, 0x00000000);
+ nvif_wr32(device, NV04_PBUS_PCI_NV_19, 0);
/* reset PGRAPH, PFIFO and PTIMER */
- save[1] = nv_mask(device, 0x000200, 0x00011100, 0x00000000);
- nv_mask(device, 0x000200, 0x00011100, save[1]);
+ save[1] = nvif_mask(device, 0x000200, 0x00011100, 0x00000000);
+ nvif_mask(device, 0x000200, 0x00011100, save[1]);
/* and restore bustmaster bit (gives effect of resetting AGP) */
- nv_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
+ nvif_wr32(device, NV04_PBUS_PCI_NV_1, save[0]);
#endif
}
@@ -150,7 +148,6 @@ void
nouveau_agp_init(struct nouveau_drm *drm)
{
#if __OS_HAS_AGP
- struct nouveau_device *device = nv_device(drm->device);
struct drm_device *dev = drm->dev;
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -162,13 +159,13 @@ nouveau_agp_init(struct nouveau_drm *drm)
ret = drm_agp_acquire(dev);
if (ret) {
- nv_error(device, "unable to acquire AGP: %d\n", ret);
+ NV_ERROR(drm, "unable to acquire AGP: %d\n", ret);
return;
}
ret = drm_agp_info(dev, &info);
if (ret) {
- nv_error(device, "unable to get AGP info: %d\n", ret);
+ NV_ERROR(drm, "unable to get AGP info: %d\n", ret);
return;
}
@@ -177,7 +174,7 @@ nouveau_agp_init(struct nouveau_drm *drm)
ret = drm_agp_enable(dev, mode);
if (ret) {
- nv_error(device, "unable to enable AGP: %d\n", ret);
+ NV_ERROR(drm, "unable to enable AGP: %d\n", ret);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 2c1e4aad7da3..e566c5b53651 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -40,8 +40,8 @@ static int
nv40_get_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nouveau_device *device = nv_device(drm->device);
- int val = (nv_rd32(device, NV40_PMC_BACKLIGHT) &
+ struct nvif_device *device = &drm->device;
+ int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
NV40_PMC_BACKLIGHT_MASK) >> 16;
return val;
@@ -51,11 +51,11 @@ static int
nv40_set_intensity(struct backlight_device *bd)
{
struct nouveau_drm *drm = bl_get_data(bd);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int val = bd->props.brightness;
- int reg = nv_rd32(device, NV40_PMC_BACKLIGHT);
+ int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
- nv_wr32(device, NV40_PMC_BACKLIGHT,
+ nvif_wr32(device, NV40_PMC_BACKLIGHT,
(val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
return 0;
@@ -71,11 +71,11 @@ static int
nv40_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct backlight_properties props;
struct backlight_device *bd;
- if (!(nv_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
+ if (!(nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
return 0;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -97,12 +97,12 @@ nv50_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div = 1025;
u32 val;
- val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
+ val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
val &= NV50_PDISP_SOR_PWM_CTL_VAL;
return ((val * 100) + (div / 2)) / div;
}
@@ -112,12 +112,12 @@ nv50_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div = 1025;
u32 val = (bd->props.brightness * div) / 100;
- nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
+ nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
NV50_PDISP_SOR_PWM_CTL_NEW | val);
return 0;
}
@@ -133,12 +133,12 @@ nva3_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div, val;
- div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
- val = nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
+ div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
+ val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
if (div && div >= val)
return ((val * 100) + (div / 2)) / div;
@@ -151,14 +151,14 @@ nva3_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int or = nv_encoder->or;
u32 div, val;
- div = nv_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
+ div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
val = (bd->props.brightness * div) / 100;
if (div) {
- nv_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
+ nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or), val |
NV50_PDISP_SOR_PWM_CTL_NEW |
NVA3_PDISP_SOR_PWM_CTL_UNK);
return 0;
@@ -177,7 +177,7 @@ static int
nv50_backlight_init(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nouveau_encoder *nv_encoder;
struct backlight_properties props;
struct backlight_device *bd;
@@ -190,12 +190,12 @@ nv50_backlight_init(struct drm_connector *connector)
return -ENODEV;
}
- if (!nv_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
+ if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
return 0;
- if (device->chipset <= 0xa0 ||
- device->chipset == 0xaa ||
- device->chipset == 0xac)
+ if (device->info.chipset <= 0xa0 ||
+ device->info.chipset == 0xaa ||
+ device->info.chipset == 0xac)
ops = &nv50_bl_ops;
else
ops = &nva3_bl_ops;
@@ -218,7 +218,7 @@ int
nouveau_backlight_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -226,13 +226,12 @@ nouveau_backlight_init(struct drm_device *dev)
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
- switch (device->card_type) {
- case NV_40:
+ switch (device->info.family) {
+ case NV_DEVICE_INFO_V0_CURIE:
return nv40_backlight_init(connector);
- case NV_50:
- case NV_C0:
- case NV_D0:
- case NV_E0:
+ case NV_DEVICE_INFO_V0_TESLA:
+ case NV_DEVICE_INFO_V0_FERMI:
+ case NV_DEVICE_INFO_V0_KEPLER:
return nv50_backlight_init(connector);
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 8268a4ccac15..dae2c96deef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -22,8 +22,6 @@
* SOFTWARE.
*/
-#include <subdev/bios.h>
-
#include <drm/drmP.h>
#include "nouveau_drm.h"
@@ -217,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nvbios *bios = &drm->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
@@ -240,7 +238,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
NV_INFO(drm, "Calling LVDS script %d:\n", script);
/* don't let script change pll->head binding */
- sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
+ sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
if (lvds_ver < 0x30)
ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
@@ -252,7 +250,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
/* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
- nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
+ nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
return ret;
}
@@ -320,7 +318,7 @@ static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct n
static int
get_fp_strap(struct drm_device *dev, struct nvbios *bios)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
/*
* The fp strap is normally dictated by the "User Strap" in
@@ -334,10 +332,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
- if (device->card_type >= NV_50)
- return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
+ return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
else
- return (nv_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
+ return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
}
static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
@@ -636,7 +634,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nvbios *bios = &drm->vbios;
int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
@@ -670,7 +668,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
}
/* don't let script change pll->head binding */
- sel_clk_binding = nv_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
+ sel_clk_binding = nvif_rd32(device, NV_PRAMDAC_SEL_CLK) & 0x50000;
run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
@@ -1253,7 +1251,7 @@ olddcb_table(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
u8 *dcb = NULL;
- if (nv_device(drm->device)->card_type > NV_04)
+ if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT)
dcb = ROMPTR(dev, drm->vbios.data[0x36]);
if (!dcb) {
NV_WARN(drm, "No DCB data found in VBIOS\n");
@@ -1399,6 +1397,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_output *entry)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ int link = 0;
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
@@ -1444,6 +1443,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
if (conf & 0x4)
entry->lvdsconf.use_power_scripts = true;
entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4;
+ link = entry->lvdsconf.sor.link;
}
if (conf & mask) {
/*
@@ -1492,17 +1492,18 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->dpconf.link_nr = 1;
break;
}
+ link = entry->dpconf.sor.link;
break;
case DCB_OUTPUT_TMDS:
if (dcb->version >= 0x40) {
entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
entry->extdev = (conf & 0x0000ff00) >> 8;
+ link = entry->tmdsconf.sor.link;
}
else if (dcb->version >= 0x30)
entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
else if (dcb->version >= 0x22)
entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
-
break;
case DCB_OUTPUT_EOL:
/* weird g80 mobile type that "nv" treats as a terminator */
@@ -1526,6 +1527,8 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
if (conf & 0x100000)
entry->i2c_upper_default = true;
+ entry->hasht = (entry->location << 4) | entry->type;
+ entry->hashm = (entry->heads << 8) | (link << 6) | entry->or;
return true;
}
@@ -1908,7 +1911,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
*/
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
uint8_t bytes_to_write;
uint16_t hwsq_entry_offset;
int i;
@@ -1931,15 +1934,15 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
/* set sequencer control */
- nv_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
+ nvif_wr32(device, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
bytes_to_write -= 4;
/* write ucode */
for (i = 0; i < bytes_to_write; i += 4)
- nv_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
+ nvif_wr32(device, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
/* twiddle NV_PBUS_DEBUG_4 */
- nv_wr32(device, NV_PBUS_DEBUG_4, nv_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
+ nvif_wr32(device, NV_PBUS_DEBUG_4, nvif_rd32(device, NV_PBUS_DEBUG_4) | 0x18);
return 0;
}
@@ -2002,7 +2005,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
static bool NVInitVBIOS(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bios *bios = nouveau_bios(drm->device);
+ struct nouveau_bios *bios = nvkm_bios(&drm->device);
struct nvbios *legacy = &drm->vbios;
memset(legacy, 0, sizeof(struct nvbios));
@@ -2054,7 +2057,7 @@ nouveau_bios_posted(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
unsigned htotal;
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
return true;
htotal = NVReadVgaCrtc(dev, 0, 0x06);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b6dc85c614be..da5d631aa5b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,13 +27,9 @@
* Jeremy Kolb <jkolb@brandeis.edu>
*/
-#include <core/engine.h>
+#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/bar.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -52,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
{
struct nouveau_drm *drm = nouveau_drm(dev);
int i = reg - drm->tile.reg;
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_fb_tile *tile = &pfb->tile.region[i];
struct nouveau_engine *engine;
@@ -109,7 +105,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
u32 size, u32 pitch, u32 flags)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_drm_tile *tile, *found = NULL;
int i;
@@ -153,23 +149,23 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
- if (device->card_type < NV_50) {
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->tile_mode) {
- if (device->chipset >= 0x40) {
+ if (device->info.chipset >= 0x40) {
*align = 65536;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x30) {
+ } else if (device->info.chipset >= 0x30) {
*align = 32768;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x20) {
+ } else if (device->info.chipset >= 0x20) {
*align = 16384;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (device->chipset >= 0x10) {
+ } else if (device->info.chipset >= 0x10) {
*align = 16384;
*size = roundup(*size, 32 * nvbo->tile_mode);
}
@@ -196,12 +192,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
int lpg_shift = 12;
int max_size;
- if (drm->client.base.vm)
- lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ if (drm->client.vm)
+ lpg_shift = drm->client.vm->vmm->lpg_shift;
max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) {
- nv_warn(drm, "skipped size %x\n", (u32)size);
+ NV_WARN(drm, "skipped size %x\n", (u32)size);
return -EINVAL;
}
@@ -219,9 +215,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->page_shift = 12;
- if (drm->client.base.vm) {
+ if (drm->client.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
+ nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
}
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
@@ -261,11 +257,9 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
- u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
+ u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
- if ((nv_device(drm->device)->card_type == NV_10 ||
- nv_device(drm->device)->card_type == NV_11) &&
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@@ -309,7 +303,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
goto out;
@@ -350,7 +344,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret, ref;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
return ret;
@@ -385,7 +379,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
{
int ret;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
@@ -500,21 +494,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- if (nv_device(drm->device)->card_type >= NV_50) {
+ man->flags = TTM_MEMTYPE_FLAG_FIXED |
+ TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ /* Some BARs do not support being ioremapped WC */
+ if (nvkm_bar(&drm->device)->iomap_uncached) {
+ man->available_caching = TTM_PL_FLAG_UNCACHED;
+ man->default_caching = TTM_PL_FLAG_UNCACHED;
+ }
+
man->func = &nouveau_vram_manager;
man->io_reserve_fastpath = false;
man->use_io_reserve_lru = true;
} else {
man->func = &ttm_bo_manager_func;
}
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
man->func = &nouveau_gart_manager;
else
if (drm->agp.stat != ENABLED)
@@ -763,9 +764,9 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, NvDmaFB);
- OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, chan->drm->ntfy.handle);
+ OUT_RING (chan, chan->vram.handle);
+ OUT_RING (chan, chan->vram.handle);
}
return ret;
@@ -852,7 +853,7 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
OUT_RING (chan, handle);
BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
- OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, chan->drm->ntfy.handle);
}
return ret;
@@ -864,7 +865,7 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
{
if (mem->mem_type == TTM_PL_TT)
return NvDmaTT;
- return NvDmaFB;
+ return chan->vram.handle;
}
static int
@@ -922,12 +923,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+ ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret)
return ret;
- ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+ ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
NV_MEM_ACCESS_RW, &old_node->vma[1]);
if (ret) {
nouveau_vm_put(&old_node->vma[0]);
@@ -945,6 +946,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
+ struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nouveau_fence *fence;
int ret;
@@ -952,13 +954,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_move_prep(drm, bo, new_mem);
if (ret)
return ret;
}
- mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(bo->sync_obj, chan);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
@@ -973,7 +975,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
}
}
}
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
return ret;
}
@@ -1005,9 +1007,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
int ret;
do {
- struct nouveau_object *object;
struct nouveau_channel *chan;
- u32 handle = (mthd->engine << 16) | mthd->oclass;
if (mthd->engine)
chan = drm->cechan;
@@ -1016,13 +1016,14 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
if (chan == NULL)
continue;
- ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
- mthd->oclass, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL,
+ mthd->oclass | (mthd->engine << 16),
+ mthd->oclass, NULL, 0,
+ &drm->ttm.copy);
if (ret == 0) {
- ret = mthd->init(chan, handle);
+ ret = mthd->init(chan, drm->ttm.copy.handle);
if (ret) {
- nouveau_object_del(nv_object(drm),
- chan->handle, handle);
+ nvif_object_fini(&drm->ttm.copy);
continue;
}
@@ -1135,7 +1136,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- if (nv_device(drm->device)->card_type >= NV_10) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -1166,7 +1167,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
@@ -1203,7 +1204,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
@@ -1249,16 +1250,16 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
+ mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
mem->bus.is_iomem = true;
- if (nv_device(drm->device)->card_type >= NV_50) {
- struct nouveau_bar *bar = nouveau_bar(drm->device);
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ struct nouveau_bar *bar = nvkm_bar(&drm->device);
ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
@@ -1278,7 +1279,7 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
- struct nouveau_bar *bar = nouveau_bar(drm->device);
+ struct nouveau_bar *bar = nvkm_bar(&drm->device);
struct nouveau_mem *node = mem->mm_node;
if (!node->bar_vma.node)
@@ -1292,15 +1293,15 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_device *device = nv_device(drm->device);
- u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
+ struct nvif_device *device = &drm->device;
+ u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
int ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (nv_device(drm->device)->card_type < NV_50 ||
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nouveau_bo_tile_layout(nvbo))
return 0;
@@ -1315,7 +1316,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
/* make sure bo is in mappable vram */
- if (nv_device(drm->device)->card_type >= NV_50 ||
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
bo->mem.start + bo->mem.num_pages < mappable)
return 0;
@@ -1333,6 +1334,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
struct nouveau_drm *drm;
struct nouveau_device *device;
struct drm_device *dev;
+ struct device *pdev;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1349,8 +1351,9 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
drm = nouveau_bdev(ttm->bdev);
- device = nv_device(drm->device);
+ device = nvkm_device(&drm->device);
dev = drm->dev;
+ pdev = nv_device_base(device);
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
@@ -1370,17 +1373,22 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
for (i = 0; i < ttm->num_pages; i++) {
- ttm_dma->dma_address[i] = nv_device_map_page(device,
- ttm->pages[i]);
- if (!ttm_dma->dma_address[i]) {
+ dma_addr_t addr;
+
+ addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(pdev, addr)) {
while (--i) {
- nv_device_unmap_page(device,
- ttm_dma->dma_address[i]);
+ dma_unmap_page(pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
+
+ ttm_dma->dma_address[i] = addr;
}
return 0;
}
@@ -1392,6 +1400,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
struct nouveau_drm *drm;
struct nouveau_device *device;
struct drm_device *dev;
+ struct device *pdev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1399,8 +1408,9 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
return;
drm = nouveau_bdev(ttm->bdev);
- device = nv_device(drm->device);
+ device = nvkm_device(&drm->device);
dev = drm->dev;
+ pdev = nv_device_base(device);
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
@@ -1418,7 +1428,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
- nv_device_unmap_page(device, ttm_dma->dma_address[i]);
+ dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index ccb6b452d6d0..99cd9e4a2aa6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -22,16 +22,11 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/device.h>
-#include <core/class.h>
-
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/instmem.h>
+#include <nvif/os.h>
+#include <nvif/class.h>
-#include <engine/software.h>
+/*XXX*/
+#include <core/client.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -47,7 +42,7 @@ module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
int
nouveau_channel_idle(struct nouveau_channel *chan)
{
- struct nouveau_cli *cli = chan->cli;
+ struct nouveau_cli *cli = (void *)nvif_client(chan->object);
struct nouveau_fence *fence = NULL;
int ret;
@@ -58,8 +53,8 @@ nouveau_channel_idle(struct nouveau_channel *chan)
}
if (ret)
- NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n",
- chan->handle, cli->base.name);
+ NV_PRINTK(error, cli, "failed to idle channel 0x%08x [%s]\n",
+ chan->object->handle, nvkm_client(&cli->base)->name);
return ret;
}
@@ -68,36 +63,34 @@ nouveau_channel_del(struct nouveau_channel **pchan)
{
struct nouveau_channel *chan = *pchan;
if (chan) {
- struct nouveau_object *client = nv_object(chan->cli);
if (chan->fence) {
nouveau_channel_idle(chan);
nouveau_fence(chan->drm)->context_del(chan);
}
- nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
- nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
+ nvif_object_fini(&chan->nvsw);
+ nvif_object_fini(&chan->gart);
+ nvif_object_fini(&chan->vram);
+ nvif_object_ref(NULL, &chan->object);
+ nvif_object_fini(&chan->push.ctxdma);
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
if (chan->push.buffer && chan->push.buffer->pin_refcnt)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
+ nvif_device_ref(NULL, &chan->device);
kfree(chan);
}
*pchan = NULL;
}
static int
-nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, u32 size,
- struct nouveau_channel **pchan)
+nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, u32 size, struct nouveau_channel **pchan)
{
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_instmem *imem = nouveau_instmem(device);
- struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
- struct nouveau_client *client = &cli->base;
- struct nv_dma_class args = {};
+ struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+ struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
+ struct nv_dma_v0 args = {};
struct nouveau_channel *chan;
- struct nouveau_object *push;
u32 target;
int ret;
@@ -105,9 +98,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
if (!chan)
return -ENOMEM;
- chan->cli = cli;
+ nvif_device_ref(device, &chan->device);
chan->drm = drm;
- chan->handle = handle;
/* allocate memory for dma push buffer */
target = TTM_PL_FLAG_TT;
@@ -132,51 +124,54 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
* we be able to call out to other (indirect) push buffers
*/
chan->push.vma.offset = chan->push.buffer->bo.offset;
- chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
- if (device->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm,
&chan->push.vma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
}
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = client->vm->vmm->limit - 1;
+ args.limit = cli->vm->vmm->limit - 1;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
- u64 limit = pfb->ram->size - imem->reserved - 1;
- if (device->card_type == NV_04) {
+ if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..
* nfi why this exists, it came from the -nv ddx.
*/
- args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
- args.start = nv_device_resource_start(device, 1);
- args.limit = args.start + limit;
+ args.target = NV_DMA_V0_TARGET_PCI;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
+ args.start = nv_device_resource_start(nvkm_device(device), 1);
+ args.limit = args.start + device->info.ram_user - 1;
} else {
- args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VRAM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = limit;
+ args.limit = device->info.ram_user - 1;
}
} else {
if (chan->drm->agp.stat == ENABLED) {
- args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_AGP;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
}
- ret = nouveau_object_new(nv_object(chan->cli), parent,
- chan->push.handle, 0x0002,
- &args, sizeof(args), &push);
+ ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH |
+ (handle & 0xffff), NV_DMA_FROM_MEMORY,
+ &args, sizeof(args), &chan->push.ctxdma);
if (ret) {
nouveau_channel_del(pchan);
return ret;
@@ -186,38 +181,56 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
}
static int
-nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, u32 engine,
- struct nouveau_channel **pchan)
+nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, u32 engine, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { NVE0_CHANNEL_IND_CLASS,
- NVC0_CHANNEL_IND_CLASS,
- NV84_CHANNEL_IND_CLASS,
- NV50_CHANNEL_IND_CLASS,
+ static const u16 oclasses[] = { KEPLER_CHANNEL_GPFIFO_A,
+ FERMI_CHANNEL_GPFIFO,
+ G82_CHANNEL_GPFIFO,
+ NV50_CHANNEL_GPFIFO,
0 };
const u16 *oclass = oclasses;
- struct nve0_channel_ind_class args;
+ union {
+ struct nv50_channel_gpfifo_v0 nv50;
+ struct kepler_channel_gpfifo_a_v0 kepler;
+ } args, *retn;
struct nouveau_channel *chan;
+ u32 size;
int ret;
/* allocate dma push buffer */
- ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
+ ret = nouveau_channel_prep(drm, device, handle, 0x12000, &chan);
*pchan = chan;
if (ret)
return ret;
/* create channel object */
- args.pushbuf = chan->push.handle;
- args.ioffset = 0x10000 + chan->push.vma.offset;
- args.ilength = 0x02000;
- args.engine = engine;
-
do {
- ret = nouveau_object_new(nv_object(cli), parent, handle,
- *oclass++, &args, sizeof(args),
- &chan->object);
- if (ret == 0)
+ if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
+ args.kepler.version = 0;
+ args.kepler.engine = engine;
+ args.kepler.pushbuf = chan->push.ctxdma.handle;
+ args.kepler.ilength = 0x02000;
+ args.kepler.ioffset = 0x10000 + chan->push.vma.offset;
+ size = sizeof(args.kepler);
+ } else {
+ args.nv50.version = 0;
+ args.nv50.pushbuf = chan->push.ctxdma.handle;
+ args.nv50.ilength = 0x02000;
+ args.nv50.ioffset = 0x10000 + chan->push.vma.offset;
+ size = sizeof(args.nv50);
+ }
+
+ ret = nvif_object_new(nvif_object(device), handle, *oclass++,
+ &args, size, &chan->object);
+ if (ret == 0) {
+ retn = chan->object->data;
+ if (chan->object->oclass >= KEPLER_CHANNEL_GPFIFO_A)
+ chan->chid = retn->kepler.chid;
+ else
+ chan->chid = retn->nv50.chid;
return ret;
+ }
} while (*oclass);
nouveau_channel_del(pchan);
@@ -225,35 +238,38 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
}
static int
-nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, struct nouveau_channel **pchan)
+nouveau_channel_dma(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, struct nouveau_channel **pchan)
{
- static const u16 oclasses[] = { NV40_CHANNEL_DMA_CLASS,
- NV17_CHANNEL_DMA_CLASS,
- NV10_CHANNEL_DMA_CLASS,
- NV03_CHANNEL_DMA_CLASS,
+ static const u16 oclasses[] = { NV40_CHANNEL_DMA,
+ NV17_CHANNEL_DMA,
+ NV10_CHANNEL_DMA,
+ NV03_CHANNEL_DMA,
0 };
const u16 *oclass = oclasses;
- struct nv03_channel_dma_class args;
+ struct nv03_channel_dma_v0 args, *retn;
struct nouveau_channel *chan;
int ret;
/* allocate dma push buffer */
- ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
+ ret = nouveau_channel_prep(drm, device, handle, 0x10000, &chan);
*pchan = chan;
if (ret)
return ret;
/* create channel object */
- args.pushbuf = chan->push.handle;
+ args.version = 0;
+ args.pushbuf = chan->push.ctxdma.handle;
args.offset = chan->push.vma.offset;
do {
- ret = nouveau_object_new(nv_object(cli), parent, handle,
- *oclass++, &args, sizeof(args),
- &chan->object);
- if (ret == 0)
+ ret = nvif_object_new(nvif_object(device), handle, *oclass++,
+ &args, sizeof(args), &chan->object);
+ if (ret == 0) {
+ retn = chan->object->data;
+ chan->chid = retn->chid;
return ret;
+ }
} while (ret && *oclass);
nouveau_channel_del(pchan);
@@ -263,60 +279,63 @@ nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
static int
nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
{
- struct nouveau_client *client = nv_client(chan->cli);
- struct nouveau_device *device = nv_device(chan->drm->device);
- struct nouveau_instmem *imem = nouveau_instmem(device);
- struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
- struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nvif_device *device = chan->device;
+ struct nouveau_cli *cli = (void *)nvif_client(&device->base);
+ struct nouveau_vmmgr *vmm = nvkm_vmmgr(device);
struct nouveau_software_chan *swch;
- struct nouveau_object *object;
- struct nv_dma_class args = {};
+ struct nv_dma_v0 args = {};
int ret, i;
+ nvif_object_map(chan->object);
+
/* allocate dma objects to cover all allowed vram, and gart */
- if (device->card_type < NV_C0) {
- if (device->card_type >= NV_50) {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = client->vm->vmm->limit - 1;
+ args.limit = cli->vm->vmm->limit - 1;
} else {
- args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VRAM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
- args.limit = pfb->ram->size - imem->reserved - 1;
+ args.limit = device->info.ram_user - 1;
}
- ret = nouveau_object_new(nv_object(client), chan->handle, vram,
- 0x003d, &args, sizeof(args), &object);
+ ret = nvif_object_init(chan->object, NULL, vram,
+ NV_DMA_IN_MEMORY, &args,
+ sizeof(args), &chan->vram);
if (ret)
return ret;
- if (device->card_type >= NV_50) {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_VM;
args.start = 0;
- args.limit = client->vm->vmm->limit - 1;
+ args.limit = cli->vm->vmm->limit - 1;
} else
if (chan->drm->agp.stat == ENABLED) {
- args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_AGP;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = chan->drm->agp.base;
args.limit = chan->drm->agp.base +
chan->drm->agp.size - 1;
} else {
- args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+ args.target = NV_DMA_V0_TARGET_VM;
+ args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = 0;
args.limit = vmm->limit - 1;
}
- ret = nouveau_object_new(nv_object(client), chan->handle, gart,
- 0x003d, &args, sizeof(args), &object);
+ ret = nvif_object_init(chan->object, NULL, gart,
+ NV_DMA_IN_MEMORY, &args,
+ sizeof(args), &chan->gart);
if (ret)
return ret;
-
- chan->vram = vram;
- chan->gart = gart;
}
/* initialise dma tracking parameters */
- switch (nv_hclass(chan->object) & 0x00ff) {
+ switch (chan->object->oclass & 0x00ff) {
case 0x006b:
case 0x006e:
chan->user_put = 0x40;
@@ -347,13 +366,13 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
OUT_RING(chan, 0x00000000);
/* allocate software object class (used for fences on <= nv05) */
- if (device->card_type < NV_10) {
- ret = nouveau_object_new(nv_object(client), chan->handle,
- NvSw, 0x006e, NULL, 0, &object);
+ if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
+ ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e,
+ NULL, 0, &chan->nvsw);
if (ret)
return ret;
- swch = (void *)object->parent;
+ swch = (void *)nvkm_object(&chan->nvsw)->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = chan;
@@ -362,7 +381,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
return ret;
BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
- OUT_RING (chan, NvSw);
+ OUT_RING (chan, chan->nvsw.handle);
FIRE_RING (chan);
}
@@ -371,25 +390,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
}
int
-nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
- u32 parent, u32 handle, u32 arg0, u32 arg1,
+nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+ u32 handle, u32 arg0, u32 arg1,
struct nouveau_channel **pchan)
{
+ struct nouveau_cli *cli = (void *)nvif_client(&device->base);
int ret;
- ret = nouveau_channel_ind(drm, cli, parent, handle, arg0, pchan);
+ ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
if (ret) {
- NV_DEBUG(cli, "ib channel create, %d\n", ret);
- ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
+ NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
+ ret = nouveau_channel_dma(drm, device, handle, pchan);
if (ret) {
- NV_DEBUG(cli, "dma channel create, %d\n", ret);
+ NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
return ret;
}
}
ret = nouveau_channel_init(*pchan, arg0, arg1);
if (ret) {
- NV_ERROR(cli, "channel failed to initialise, %d\n", ret);
+ NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
nouveau_channel_del(pchan);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 40f97e2c47b6..20163709d608 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,20 +1,23 @@
#ifndef __NOUVEAU_CHAN_H__
#define __NOUVEAU_CHAN_H__
-struct nouveau_cli;
+#include <nvif/object.h>
+struct nvif_device;
struct nouveau_channel {
- struct nouveau_cli *cli;
+ struct nvif_device *device;
struct nouveau_drm *drm;
- u32 handle;
- u32 vram;
- u32 gart;
+ int chid;
+
+ struct nvif_object vram;
+ struct nvif_object gart;
+ struct nvif_object nvsw;
struct {
struct nouveau_bo *buffer;
struct nouveau_vma vma;
- u32 handle;
+ struct nvif_object ctxdma;
} push;
/* TODO: this will be reworked in the near future */
@@ -34,12 +37,12 @@ struct nouveau_channel {
u32 user_get;
u32 user_put;
- struct nouveau_object *object;
+ struct nvif_object *object;
};
-int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
- u32 parent, u32 handle, u32 arg0, u32 arg1,
+int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
+ u32 handle, u32 arg0, u32 arg1,
struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1fa222e8f007..1ec44c83e919 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -42,9 +42,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
-#include <subdev/i2c.h>
-#include <subdev/gpio.h>
-#include <engine/disp.h>
+#include <nvif/event.h>
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
static int nouveau_tv_disable = 0;
@@ -63,7 +61,7 @@ find_encoder(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
- struct drm_mode_object *obj;
+ struct drm_encoder *enc;
int i, id;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -71,10 +69,10 @@ find_encoder(struct drm_connector *connector, int type)
if (!id)
break;
- obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ enc = drm_encoder_find(dev, id);
+ if (!enc)
continue;
- nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+ nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
(nv_encoder->dcb && nv_encoder->dcb->type == type))
@@ -102,9 +100,9 @@ static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
- nouveau_event_ref(NULL, &nv_connector->hpd);
+ nvif_notify_fini(&nv_connector->hpd);
kfree(nv_connector->edid);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
if (nv_connector->aux.transfer)
drm_dp_aux_unregister(&nv_connector->aux);
@@ -117,9 +115,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
+ struct nouveau_gpio *gpio = nvkm_gpio(&drm->device);
struct nouveau_encoder *nv_encoder;
- struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
int i, panel = -ENODEV;
/* eDP panels need powering on by us (if the VBIOS doesn't default it
@@ -139,10 +137,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (id == 0)
break;
- obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(dev, id);
+ if (!encoder)
continue;
- nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+ nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
@@ -206,7 +204,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
return;
nv_connector->detected_encoder = nv_encoder;
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
} else
@@ -216,9 +214,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = false;
} else {
connector->doublescan_allowed = true;
- if (nv_device(drm->device)->card_type == NV_20 ||
- ((nv_device(drm->device)->card_type == NV_10 ||
- nv_device(drm->device)->card_type == NV_11) &&
+ if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
+ (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
(dev->pdev->device & 0x0ff0) != 0x0100 &&
(dev->pdev->device & 0x0ff0) != 0x0150))
/* HW is broken */
@@ -802,11 +799,11 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
if (dcb->location != DCB_LOC_ON_CHIP ||
- nv_device(drm->device)->chipset >= 0x46)
+ drm->device.info.chipset >= 0x46)
return 165000;
- else if (nv_device(drm->device)->chipset >= 0x40)
+ else if (drm->device.info.chipset >= 0x40)
return 155000;
- else if (nv_device(drm->device)->chipset >= 0x18)
+ else if (drm->device.info.chipset >= 0x18)
return 135000;
else
return 112000;
@@ -939,18 +936,19 @@ nouveau_connector_funcs_dp = {
.force = nouveau_connector_force
};
-static void
-nouveau_connector_hotplug_work(struct work_struct *work)
+static int
+nouveau_connector_hotplug(struct nvif_notify *notify)
{
struct nouveau_connector *nv_connector =
- container_of(work, typeof(*nv_connector), work);
+ container_of(notify, typeof(*nv_connector), hpd);
struct drm_connector *connector = &nv_connector->base;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
- if (nv_connector->status & NVKM_HPD_IRQ) {
+ if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
} else {
- bool plugged = (nv_connector->status != NVKM_HPD_UNPLUG);
+ bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
@@ -961,16 +959,7 @@ nouveau_connector_hotplug_work(struct work_struct *work)
drm_helper_hpd_irq_event(connector->dev);
}
- nouveau_event_get(nv_connector->hpd);
-}
-
-static int
-nouveau_connector_hotplug(void *data, u32 type, int index)
-{
- struct nouveau_connector *nv_connector = data;
- nv_connector->status = type;
- schedule_work(&nv_connector->work);
- return NVKM_EVENT_DROP;
+ return NVIF_NOTIFY_KEEP;
}
static ssize_t
@@ -1040,7 +1029,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct drm_connector *connector;
int type, ret = 0;
bool dummy;
@@ -1194,7 +1182,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
@@ -1226,16 +1214,20 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
- ret = nouveau_event_new(pdisp->hpd, NVKM_HPD, index,
- nouveau_connector_hotplug,
- nv_connector, &nv_connector->hpd);
+ ret = nvif_notify_init(&disp->disp, NULL, nouveau_connector_hotplug,
+ true, NV04_DISP_NTFY_CONN,
+ &(struct nvif_notify_conn_req_v0) {
+ .mask = NVIF_NOTIFY_CONN_V0_ANY,
+ .conn = index,
+ },
+ sizeof(struct nvif_notify_conn_req_v0),
+ sizeof(struct nvif_notify_conn_rep_v0),
+ &nv_connector->hpd);
if (ret)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
else
connector->polled = DRM_CONNECTOR_POLL_HPD;
- INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
-
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 8861b6c579ad..68029d041dd2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -27,14 +27,12 @@
#ifndef __NOUVEAU_CONNECTOR_H__
#define __NOUVEAU_CONNECTOR_H__
+#include <nvif/notify.h>
+
#include <drm/drm_edid.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
-#include <core/event.h>
-
-#include <subdev/bios.h>
-
struct nouveau_i2c_port;
enum nouveau_underscan_type {
@@ -67,9 +65,7 @@ struct nouveau_connector {
u8 index;
u8 *dcb;
- struct nouveau_eventh *hpd;
- u32 status;
- struct work_struct work;
+ struct nvif_notify hpd;
struct drm_dp_aux aux;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index a0534489d23f..f19cb1c5fc5a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -27,10 +27,13 @@
#ifndef __NOUVEAU_CRTC_H__
#define __NOUVEAU_CRTC_H__
+#include <nvif/notify.h>
+
struct nouveau_crtc {
struct drm_crtc base;
int index;
+ struct nvif_notify vblank;
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
@@ -46,7 +49,7 @@ struct nouveau_crtc {
int cpp;
bool blanked;
uint32_t offset;
- uint32_t tile_flags;
+ uint32_t handle;
} fb;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 47ad74255bf1..1cc7b603c753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -27,6 +27,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <nvif/class.h>
+
#include "nouveau_fbcon.h"
#include "dispnv04/hw.h"
#include "nouveau_crtc.h"
@@ -37,35 +39,42 @@
#include "nouveau_fence.h"
-#include <engine/disp.h>
-
-#include <core/class.h>
+#include <nvif/event.h>
static int
-nouveau_display_vblank_handler(void *data, u32 type, int head)
+nouveau_display_vblank_handler(struct nvif_notify *notify)
{
- struct nouveau_drm *drm = data;
- drm_handle_vblank(drm->dev, head);
- return NVKM_EVENT_KEEP;
+ struct nouveau_crtc *nv_crtc =
+ container_of(notify, typeof(*nv_crtc), vblank);
+ drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
+ return NVIF_NOTIFY_KEEP;
}
int
nouveau_display_vblank_enable(struct drm_device *dev, int head)
{
- struct nouveau_display *disp = nouveau_display(dev);
- if (disp) {
- nouveau_event_get(disp->vblank[head]);
- return 0;
+ struct drm_crtc *crtc;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (nv_crtc->index == head) {
+ nvif_notify_get(&nv_crtc->vblank);
+ return 0;
+ }
}
- return -EIO;
+ return -EINVAL;
}
void
nouveau_display_vblank_disable(struct drm_device *dev, int head)
{
- struct nouveau_display *disp = nouveau_display(dev);
- if (disp)
- nouveau_event_put(disp->vblank[head]);
+ struct drm_crtc *crtc;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (nv_crtc->index == head) {
+ nvif_notify_put(&nv_crtc->vblank);
+ return;
+ }
+ }
}
static inline int
@@ -86,17 +95,22 @@ int
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
- const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+ struct {
+ struct nv04_disp_mthd_v0 base;
+ struct nv04_disp_scanoutpos_v0 scan;
+ } args = {
+ .base.method = NV04_DISP_SCANOUTPOS,
+ .base.head = nouveau_crtc(crtc)->index,
+ };
struct nouveau_display *disp = nouveau_display(crtc->dev);
- struct nv04_display_scanoutpos args;
int ret, retry = 1;
do {
- ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+ ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
if (ret != 0)
return 0;
- if (args.vline) {
+ if (args.scan.vline) {
ret |= DRM_SCANOUTPOS_ACCURATE;
ret |= DRM_SCANOUTPOS_VALID;
break;
@@ -105,10 +119,11 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
if (retry) ndelay(crtc->linedur_ns);
} while (retry--);
- *hpos = args.hline;
- *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
- if (stime) *stime = ns_to_ktime(args.time[0]);
- if (etime) *etime = ns_to_ktime(args.time[1]);
+ *hpos = args.scan.hline;
+ *vpos = calc(args.scan.vblanks, args.scan.vblanke,
+ args.scan.vtotal, args.scan.vline);
+ if (stime) *stime = ns_to_ktime(args.scan.time[0]);
+ if (etime) *etime = ns_to_ktime(args.scan.time[1]);
if (*vpos < 0)
ret |= DRM_SCANOUTPOS_INVBL;
@@ -151,16 +166,13 @@ nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
static void
nouveau_display_vblank_fini(struct drm_device *dev)
{
- struct nouveau_display *disp = nouveau_display(dev);
- int i;
+ struct drm_crtc *crtc;
drm_vblank_cleanup(dev);
- if (disp->vblank) {
- for (i = 0; i < dev->mode_config.num_crtc; i++)
- nouveau_event_ref(NULL, &disp->vblank[i]);
- kfree(disp->vblank);
- disp->vblank = NULL;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nvif_notify_fini(&nv_crtc->vblank);
}
}
@@ -168,19 +180,20 @@ static int
nouveau_display_vblank_init(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
- int ret, i;
-
- disp->vblank = kzalloc(dev->mode_config.num_crtc *
- sizeof(*disp->vblank), GFP_KERNEL);
- if (!disp->vblank)
- return -ENOMEM;
+ struct drm_crtc *crtc;
+ int ret;
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- ret = nouveau_event_new(pdisp->vblank, 1, i,
- nouveau_display_vblank_handler,
- drm, &disp->vblank[i]);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ ret = nvif_notify_init(&disp->disp, NULL,
+ nouveau_display_vblank_handler, false,
+ NV04_DISP_NTFY_VBLANK,
+ &(struct nvif_notify_head_req_v0) {
+ .head = nv_crtc->index,
+ },
+ sizeof(struct nvif_notify_head_req_v0),
+ sizeof(struct nvif_notify_head_rep_v0),
+ &nv_crtc->vblank);
if (ret) {
nouveau_display_vblank_fini(dev);
return ret;
@@ -200,6 +213,10 @@ static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct nouveau_display *disp = nouveau_display(drm_fb->dev);
+
+ if (disp->fb_dtor)
+ disp->fb_dtor(drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
@@ -229,63 +246,24 @@ nouveau_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct nouveau_bo *nvbo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_display *disp = nouveau_display(dev);
struct drm_framebuffer *fb = &nv_fb->base;
int ret;
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
nv_fb->nvbo = nvbo;
- if (nv_device(drm->device)->card_type >= NV_50) {
- u32 tile_flags = nouveau_bo_tile_layout(nvbo);
- if (tile_flags == 0x7a00 ||
- tile_flags == 0xfe00)
- nv_fb->r_dma = NvEvoFB32;
- else
- if (tile_flags == 0x7000)
- nv_fb->r_dma = NvEvoFB16;
- else
- nv_fb->r_dma = NvEvoVRAM_LP;
-
- switch (fb->depth) {
- case 8: nv_fb->r_format = 0x1e00; break;
- case 15: nv_fb->r_format = 0xe900; break;
- case 16: nv_fb->r_format = 0xe800; break;
- case 24:
- case 32: nv_fb->r_format = 0xcf00; break;
- case 30: nv_fb->r_format = 0xd100; break;
- default:
- NV_ERROR(drm, "unknown depth %d\n", fb->depth);
- return -EINVAL;
- }
-
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
- NV_ERROR(drm, "framebuffer requires contiguous bo\n");
- return -EINVAL;
- }
-
- if (nv_device(drm->device)->chipset == 0x50)
- nv_fb->r_format |= (tile_flags << 8);
-
- if (!tile_flags) {
- if (nv_device(drm->device)->card_type < NV_D0)
- nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
- else
- nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
- } else {
- u32 mode = nvbo->tile_mode;
- if (nv_device(drm->device)->card_type >= NV_C0)
- mode >>= 4;
- nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
- }
- }
-
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret) {
+ if (ret)
return ret;
+
+ if (disp->fb_ctor) {
+ ret = disp->fb_ctor(fb);
+ if (ret)
+ disp->fb_dtor(fb);
}
- return 0;
+ return ret;
}
static struct drm_framebuffer *
@@ -393,7 +371,7 @@ nouveau_display_init(struct drm_device *dev)
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd) nouveau_event_get(conn->hpd);
+ nvif_notify_get(&conn->hpd);
}
return ret;
@@ -404,37 +382,32 @@ nouveau_display_fini(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
struct drm_connector *connector;
+ int head;
+
+ /* Make sure that drm and hw vblank irqs get properly disabled. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_off(dev, head);
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd) nouveau_event_put(conn->hpd);
+ nvif_notify_put(&conn->hpd);
}
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
-int
-nouveau_display_create(struct drm_device *dev)
+static void
+nouveau_display_create_properties(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_display *disp;
- int ret, gen;
-
- disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
-
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dvi_i_properties(dev);
+ struct nouveau_display *disp = nouveau_display(dev);
+ int gen;
- if (nv_device(drm->device)->card_type < NV_50)
+ if (disp->disp.oclass < NV50_DISP)
gen = 0;
else
- if (nv_device(drm->device)->card_type < NV_D0)
+ if (disp->disp.oclass < GF110_DISP)
gen = 1;
else
gen = 2;
@@ -449,26 +422,43 @@ nouveau_display_create(struct drm_device *dev)
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
- if (gen >= 1) {
- /* -90..+90 */
- disp->vibrant_hue_property =
- drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+ if (gen < 1)
+ return;
- /* -100..+100 */
- disp->color_vibrance_property =
- drm_property_create_range(dev, 0, "color vibrance", 0, 200);
- }
+ /* -90..+90 */
+ disp->vibrant_hue_property =
+ drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+
+ /* -100..+100 */
+ disp->color_vibrance_property =
+ drm_property_create_range(dev, 0, "color vibrance", 0, 200);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_display *disp;
+ int ret;
+
+ disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
- dev->mode_config.fb_base = nv_device_resource_start(device, 1);
+ dev->mode_config.fb_base = nv_device_resource_start(nvkm_device(&drm->device), 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- if (nv_device(drm->device)->card_type < NV_10) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
} else
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -479,7 +469,7 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- if (nv_device(drm->device)->chipset < 0x11)
+ if (drm->device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
else
dev->mode_config.async_page_flip = true;
@@ -487,29 +477,30 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (drm->vbios.dcb.entries) {
+ if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
- GM107_DISP_CLASS,
- NVF0_DISP_CLASS,
- NVE0_DISP_CLASS,
- NVD0_DISP_CLASS,
- NVA3_DISP_CLASS,
- NV94_DISP_CLASS,
- NVA0_DISP_CLASS,
- NV84_DISP_CLASS,
- NV50_DISP_CLASS,
- NV04_DISP_CLASS,
+ GM107_DISP,
+ GK110_DISP,
+ GK104_DISP,
+ GF110_DISP,
+ GT214_DISP,
+ GT206_DISP,
+ GT200_DISP,
+ G82_DISP,
+ NV50_DISP,
+ NV04_DISP,
};
int i;
for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
- NVDRM_DISPLAY, oclass[i],
- NULL, 0, &disp->core);
+ ret = nvif_object_init(nvif_object(&drm->device), NULL,
+ NVDRM_DISPLAY, oclass[i],
+ NULL, 0, &disp->disp);
}
if (ret == 0) {
- if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+ nouveau_display_create_properties(dev);
+ if (disp->disp.oclass < NV50_DISP)
ret = nv04_display_create(dev);
else
ret = nv50_display_create(dev);
@@ -542,7 +533,6 @@ void
nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
@@ -553,7 +543,7 @@ nouveau_display_destroy(struct drm_device *dev)
if (disp->dtor)
disp->dtor(dev);
- nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+ nvif_object_fini(&disp->disp);
nouveau_drm(dev)->display = NULL;
kfree(disp);
@@ -620,6 +610,8 @@ void
nouveau_display_resume(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ int head;
+
nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
@@ -629,6 +621,10 @@ nouveau_display_resume(struct drm_device *dev)
nv_crtc->lut.depth = 0;
}
+ /* Make sure that drm and hw vblank irqs get resumed if needed. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_on(dev, head);
+
drm_helper_resume_force_mode(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -669,7 +665,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- if (nv_device(drm->device)->card_type < NV_C0)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
else
BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
@@ -698,12 +694,15 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan = drm->channel;
+ struct nouveau_channel *chan;
+ struct nouveau_cli *cli;
struct nouveau_fence *fence;
int ret;
- if (!drm->channel)
+ chan = drm->channel;
+ if (!chan)
return -ENODEV;
+ cli = (void *)nvif_client(&chan->device->base);
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -715,7 +714,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
goto fail_free;
}
- mutex_lock(&chan->cli->mutex);
+ mutex_lock(&cli->mutex);
/* synchronise rendering channel with the kernel's channel */
spin_lock(&new_bo->bo.bdev->fence_lock);
@@ -740,7 +739,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
drm_vblank_get(dev, nouveau_crtc(crtc)->index);
/* Emit a page flip */
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
if (ret)
goto fail_unreserve;
@@ -769,7 +768,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
/* Update the crtc struct and cleanup */
crtc->primary->fb = fb;
@@ -785,7 +784,7 @@ fail_unreserve:
drm_vblank_put(dev, nouveau_crtc(crtc)->index);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
if (old_bo != new_bo)
nouveau_bo_unpin(new_bo);
fail_free:
@@ -815,7 +814,7 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
/* Vblank timestamps/counts are only correct on >= NV-50 */
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
crtcid = s->crtc;
drm_send_vblank_event(dev, crtcid, s->event);
@@ -841,7 +840,7 @@ nouveau_flip_complete(void *data)
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
nv_set_crtc_base(drm->dev, state.crtc, state.offset +
state.y * state.pitch +
state.x * state.bpp / 8);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index a71cf77e55b2..88ca177cb1c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -9,9 +9,11 @@ struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
struct nouveau_vma vma;
- u32 r_dma;
+ u32 r_handle;
u32 r_format;
u32 r_pitch;
+ struct nvif_object h_base[4];
+ struct nvif_object h_core;
};
static inline struct nouveau_framebuffer *
@@ -36,8 +38,10 @@ struct nouveau_display {
int (*init)(struct drm_device *);
void (*fini)(struct drm_device *);
- struct nouveau_object *core;
- struct nouveau_eventh **vblank;
+ int (*fb_ctor)(struct drm_framebuffer *);
+ void (*fb_dtor)(struct drm_framebuffer *);
+
+ struct nvif_object disp;
struct drm_property *dithering_mode;
struct drm_property *dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index c177272152e2..8508603cc8c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,8 +24,6 @@
*
*/
-#include <core/client.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -54,9 +52,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
uint64_t val;
- val = nv_ro32(chan->object, chan->user_get);
+ val = nvif_rd32(chan, chan->user_get);
if (chan->user_get_hi)
- val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
+ val |= (uint64_t)nvif_rd32(chan, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -84,12 +82,13 @@ void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length)
{
+ struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nouveau_bo *pb = chan->push.buffer;
struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset;
- vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
+ vma = nouveau_bo_vma_find(bo, cli->vm);
BUG_ON(!vma);
offset = vma->offset + delta;
@@ -104,7 +103,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
- nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
+ nvif_wr32(chan, 0x8c, chan->dma.ib_put);
chan->dma.ib_free--;
}
@@ -114,7 +113,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
uint32_t cnt = 0, prev_get = 0;
while (chan->dma.ib_free < count) {
- uint32_t get = nv_ro32(chan->object, 0x88);
+ uint32_t get = nvif_rd32(chan, 0x88);
if (get != prev_get) {
prev_get = get;
cnt = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dc0e0c5cadb4..8da0a272c45a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -58,31 +58,14 @@ enum {
FermiSw = 5, /* DO NOT CHANGE (well.. 6/7 will work...) */
};
-/* Object handles. */
+/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
- NvM2MF = 0x80000001,
NvDmaFB = 0x80000002,
NvDmaTT = 0x80000003,
NvNotify0 = 0x80000006,
- Nv2D = 0x80000007,
- NvCtxSurf2D = 0x80000008,
- NvRop = 0x80000009,
- NvImagePatt = 0x8000000a,
- NvClipRect = 0x8000000b,
- NvGdiRect = 0x8000000c,
- NvImageBlit = 0x8000000d,
- NvSw = 0x8000000e,
NvSema = 0x8000000f,
NvEvoSema0 = 0x80000010,
NvEvoSema1 = 0x80000011,
- NvNotify1 = 0x80000012,
-
- /* G80+ display objects */
- NvEvoVRAM = 0x01000000,
- NvEvoFB16 = 0x01000001,
- NvEvoFB32 = 0x01000002,
- NvEvoVRAM_LP = 0x01000003,
- NvEvoSync = 0xcafe0000
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
@@ -157,7 +140,7 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
#define WRITE_PUT(val) do { \
mb(); \
nouveau_bo_rd32(chan->push.buffer, 0); \
- nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
+ nvif_wr32(chan, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
} while (0)
static inline void
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 5675ffc175ae..c5137cccce7d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,11 +30,6 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
-#include <core/class.h>
-
-#include <subdev/gpio.h>
-#include <subdev/i2c.h>
-
static void
nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
u8 *dpcd)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c9428c943afb..250a5e88c751 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -27,21 +27,14 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
+
#include "drmP.h"
#include "drm_crtc_helper.h"
+
#include <core/device.h>
-#include <core/client.h>
#include <core/gpuobj.h>
-#include <core/class.h>
#include <core/option.h>
-#include <engine/device.h>
-#include <engine/disp.h>
-#include <engine/fifo.h>
-#include <engine/software.h>
-
-#include <subdev/vm.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_ttm.h"
@@ -57,6 +50,7 @@
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
#include "nouveau_debugfs.h"
+#include "nouveau_usif.h"
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -109,40 +103,37 @@ static int
nouveau_cli_create(u64 name, const char *sname,
int size, void **pcli)
{
- struct nouveau_cli *cli;
- int ret;
-
- *pcli = NULL;
- ret = nouveau_client_create_(sname, name, nouveau_config,
- nouveau_debug, size, pcli);
- cli = *pcli;
- if (ret) {
- if (cli)
- nouveau_client_destroy(&cli->base);
- *pcli = NULL;
+ struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
+ if (cli) {
+ int ret = nvif_client_init(NULL, NULL, sname, name,
+ nouveau_config, nouveau_debug,
+ &cli->base);
+ if (ret == 0) {
+ mutex_init(&cli->mutex);
+ usif_client_init(cli);
+ }
return ret;
}
-
- mutex_init(&cli->mutex);
- return 0;
+ return -ENOMEM;
}
static void
nouveau_cli_destroy(struct nouveau_cli *cli)
{
- struct nouveau_object *client = nv_object(cli);
- nouveau_vm_ref(NULL, &cli->base.vm, NULL);
- nouveau_client_fini(&cli->base, false);
- atomic_set(&client->refcount, 1);
- nouveau_object_ref(NULL, &client);
+ nouveau_vm_ref(NULL, &nvkm_client(&cli->base)->vm, NULL);
+ nvif_client_fini(&cli->base);
+ usif_client_fini(cli);
}
static void
nouveau_accel_fini(struct nouveau_drm *drm)
{
- nouveau_gpuobj_ref(NULL, &drm->notify);
nouveau_channel_del(&drm->channel);
+ nvif_object_fini(&drm->ntfy);
+ nouveau_gpuobj_ref(NULL, &drm->notify);
+ nvif_object_fini(&drm->nvsw);
nouveau_channel_del(&drm->cechan);
+ nvif_object_fini(&drm->ttm.copy);
if (drm->fence)
nouveau_fence(drm)->dtor(drm);
}
@@ -150,46 +141,71 @@ nouveau_accel_fini(struct nouveau_drm *drm)
static void
nouveau_accel_init(struct nouveau_drm *drm)
{
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_object *object;
+ struct nvif_device *device = &drm->device;
u32 arg0, arg1;
- int ret;
+ u32 sclass[16];
+ int ret, i;
- if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/)
+ if (nouveau_noaccel)
return;
/* initialise synchronisation routines */
- if (device->card_type < NV_10) ret = nv04_fence_create(drm);
- else if (device->card_type < NV_11 ||
- device->chipset < 0x17) ret = nv10_fence_create(drm);
- else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
- else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
- else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
- else ret = nvc0_fence_create(drm);
+ /*XXX: this is crap, but the fence/channel stuff is a little
+ * backwards in some places. this will be fixed.
+ */
+ ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass));
+ if (ret < 0)
+ return;
+
+ for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) {
+ switch (sclass[i]) {
+ case NV03_CHANNEL_DMA:
+ ret = nv04_fence_create(drm);
+ break;
+ case NV10_CHANNEL_DMA:
+ ret = nv10_fence_create(drm);
+ break;
+ case NV17_CHANNEL_DMA:
+ case NV40_CHANNEL_DMA:
+ ret = nv17_fence_create(drm);
+ break;
+ case NV50_CHANNEL_GPFIFO:
+ ret = nv50_fence_create(drm);
+ break;
+ case G82_CHANNEL_GPFIFO:
+ ret = nv84_fence_create(drm);
+ break;
+ case FERMI_CHANNEL_GPFIFO:
+ case KEPLER_CHANNEL_GPFIFO_A:
+ ret = nvc0_fence_create(drm);
+ break;
+ default:
+ break;
+ }
+ }
+
if (ret) {
NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
- if (device->card_type >= NV_E0) {
- ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
- NVDRM_CHAN + 1,
- NVE0_CHANNEL_IND_ENGINE_CE0 |
- NVE0_CHANNEL_IND_ENGINE_CE1, 0,
- &drm->cechan);
+ if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
+ ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
+ KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0|
+ KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1,
+ 0, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
- arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
+ arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
arg1 = 1;
} else
- if (device->chipset >= 0xa3 &&
- device->chipset != 0xaa &&
- device->chipset != 0xac) {
- ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE,
- NVDRM_CHAN + 1, NvDmaFB, NvDmaTT,
- &drm->cechan);
+ if (device->info.chipset >= 0xa3 &&
+ device->info.chipset != 0xaa &&
+ device->info.chipset != 0xac) {
+ ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1,
+ NvDmaFB, NvDmaTT, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -200,30 +216,30 @@ nouveau_accel_init(struct nouveau_drm *drm)
arg1 = NvDmaTT;
}
- ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
- arg0, arg1, &drm->channel);
+ ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1,
+ &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
- ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW,
- nouveau_abi16_swclass(drm), NULL, 0, &object);
+ ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW,
+ nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw);
if (ret == 0) {
- struct nouveau_software_chan *swch = (void *)object->parent;
+ struct nouveau_software_chan *swch;
ret = RING_SPACE(drm->channel, 2);
if (ret == 0) {
- if (device->card_type < NV_C0) {
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
BEGIN_NV04(drm->channel, NvSubSw, 0, 1);
OUT_RING (drm->channel, NVDRM_NVSW);
} else
- if (device->card_type < NV_E0) {
+ if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) {
BEGIN_NVC0(drm->channel, FermiSw, 0, 1);
OUT_RING (drm->channel, 0x001f0000);
}
}
- swch = (void *)object->parent;
+ swch = (void *)nvkm_object(&drm->nvsw)->parent;
swch->flip = nouveau_flip_complete;
swch->flip_data = drm->channel;
}
@@ -234,24 +250,24 @@ nouveau_accel_init(struct nouveau_drm *drm)
return;
}
- if (device->card_type < NV_C0) {
- ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
- &drm->notify);
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
+ ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32,
+ 0, 0, &drm->notify);
if (ret) {
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
nouveau_accel_fini(drm);
return;
}
- ret = nouveau_object_new(nv_object(drm),
- drm->channel->handle, NvNotify0,
- 0x003d, &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(drm->channel->object, NULL, NvNotify0,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = drm->notify->addr,
.limit = drm->notify->addr + 31
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &drm->ntfy);
if (ret) {
nouveau_accel_fini(drm);
return;
@@ -294,7 +310,8 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
#ifdef CONFIG_X86
boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+ if (nouveau_modeset != 2)
+ remove_conflicting_framebuffers(aper, "nouveaufb", boot);
kfree(aper);
ret = nouveau_device_create(pdev, NOUVEAU_BUS_PCI,
@@ -348,7 +365,6 @@ static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = dev->pdev;
- struct nouveau_device *device;
struct nouveau_drm *drm;
int ret;
@@ -359,7 +375,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
- nouveau_client(drm)->debug = nouveau_dbgopt(nouveau_debug, "DRM");
+ nvkm_client(&drm->client.base)->debug =
+ nouveau_dbgopt(nouveau_debug, "DRM");
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
@@ -370,33 +387,34 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
* (possibly) execute vbios init tables (see nouveau_agp.h)
*/
if (pdev && drm_pci_device_is_agp(dev) && dev->agp) {
+ const u64 enables = NV_DEVICE_V0_DISABLE_IDENTIFY |
+ NV_DEVICE_V0_DISABLE_MMIO;
/* dummy device object, doesn't init anything, but allows
* agp code access to registers
*/
- ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT,
- NVDRM_DEVICE, 0x0080,
- &(struct nv_device_class) {
+ ret = nvif_device_init(&drm->client.base.base, NULL,
+ NVDRM_DEVICE, NV_DEVICE,
+ &(struct nv_device_v0) {
.device = ~0,
- .disable =
- ~(NV_DEVICE_DISABLE_MMIO |
- NV_DEVICE_DISABLE_IDENTIFY),
+ .disable = ~enables,
.debug0 = ~0,
- }, sizeof(struct nv_device_class),
- &drm->device);
+ }, sizeof(struct nv_device_v0),
+ &drm->device);
if (ret)
goto fail_device;
nouveau_agp_reset(drm);
- nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
+ nvif_device_fini(&drm->device);
}
- ret = nouveau_object_new(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE,
- 0x0080, &(struct nv_device_class) {
+ ret = nvif_device_init(&drm->client.base.base, NULL, NVDRM_DEVICE,
+ NV_DEVICE,
+ &(struct nv_device_v0) {
.device = ~0,
.disable = 0,
.debug0 = 0,
- }, sizeof(struct nv_device_class),
- &drm->device);
+ }, sizeof(struct nv_device_v0),
+ &drm->device);
if (ret)
goto fail_device;
@@ -406,18 +424,19 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
*/
- device = nv_device(drm->device);
- if (nv_device(drm->device)->chipset == 0xc1)
- nv_mask(device, 0x00088080, 0x00000800, 0x00000000);
+ if (drm->device.info.chipset == 0xc1)
+ nvif_mask(&drm->device, 0x00088080, 0x00000800, 0x00000000);
nouveau_vga_init(drm);
nouveau_agp_init(drm);
- if (device->card_type >= NV_50) {
- ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
- 0x1000, &drm->client.base.vm);
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
+ 0x1000, &drm->client.vm);
if (ret)
goto fail_device;
+
+ nvkm_client(&drm->client.base)->vm = drm->client.vm;
}
ret = nouveau_ttm_init(drm);
@@ -463,6 +482,7 @@ fail_ttm:
nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
fail_device:
+ nvif_device_fini(&drm->device);
nouveau_cli_destroy(&drm->client);
return ret;
}
@@ -488,26 +508,37 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
+ nvif_device_fini(&drm->device);
if (drm->hdmi_device)
pci_dev_put(drm->hdmi_device);
nouveau_cli_destroy(&drm->client);
return 0;
}
-static void
-nouveau_drm_remove(struct pci_dev *pdev)
+void
+nouveau_drm_device_remove(struct drm_device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_client *client;
struct nouveau_object *device;
dev->irq_enabled = false;
- device = drm->client.base.device;
+ client = nvkm_client(&drm->client.base);
+ device = client->device;
drm_put_dev(dev);
nouveau_object_ref(NULL, &device);
nouveau_object_debug();
}
+EXPORT_SYMBOL(nouveau_drm_device_remove);
+
+static void
+nouveau_drm_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ nouveau_drm_device_remove(dev);
+}
static int
nouveau_do_suspend(struct drm_device *dev, bool runtime)
@@ -548,13 +579,13 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
list_for_each_entry(cli, &drm->clients, head) {
- ret = nouveau_client_fini(&cli->base, true);
+ ret = nvif_client_suspend(&cli->base);
if (ret)
goto fail_client;
}
NV_INFO(drm, "suspending kernel object tree...\n");
- ret = nouveau_client_fini(&drm->client.base, true);
+ ret = nvif_client_suspend(&drm->client.base);
if (ret)
goto fail_client;
@@ -563,7 +594,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
fail_client:
list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
- nouveau_client_init(&cli->base);
+ nvif_client_resume(&cli->base);
}
if (drm->fence && nouveau_fence(drm)->resume)
@@ -611,7 +642,7 @@ nouveau_do_resume(struct drm_device *dev)
nouveau_agp_reset(drm);
NV_INFO(drm, "resuming kernel object tree...\n");
- nouveau_client_init(&drm->client.base);
+ nvif_client_resume(&drm->client.base);
nouveau_agp_init(drm);
NV_INFO(drm, "resuming client object trees...\n");
@@ -619,7 +650,7 @@ nouveau_do_resume(struct drm_device *dev)
nouveau_fence(drm)->resume(drm);
list_for_each_entry(cli, &drm->clients, head) {
- nouveau_client_init(&cli->base);
+ nvif_client_resume(&cli->base);
}
nouveau_run_vbios_init(dev);
@@ -715,13 +746,17 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
if (ret)
goto out_suspend;
- if (nv_device(drm->device)->card_type >= NV_50) {
- ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
- 0x1000, &cli->base.vm);
+ cli->base.super = false;
+
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_vm_new(nvkm_device(&drm->device), 0, (1ULL << 40),
+ 0x1000, &cli->vm);
if (ret) {
nouveau_cli_destroy(cli);
goto out_suspend;
}
+
+ nvkm_client(&cli->base)->vm = cli->vm;
}
fpriv->driver_priv = cli;
@@ -779,24 +814,31 @@ nouveau_ioctls[] = {
DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
};
-long nouveau_drm_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
+long
+nouveau_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev;
+ struct drm_file *filp = file->private_data;
+ struct drm_device *dev = filp->minor->dev;
long ret;
- dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
- ret = drm_ioctl(filp, cmd, arg);
+ switch (_IOC_NR(cmd) - DRM_COMMAND_BASE) {
+ case DRM_NOUVEAU_NVIF:
+ ret = usif_ioctl(filp, (void __user *)arg, _IOC_SIZE(cmd));
+ break;
+ default:
+ ret = drm_ioctl(file, cmd, arg);
+ break;
+ }
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
+
static const struct file_operations
nouveau_driver_fops = {
.owner = THIS_MODULE,
@@ -921,7 +963,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_device *device = nouveau_dev(drm_dev);
+ struct nvif_device *device = &nouveau_drm(drm_dev)->device;
int ret;
if (nouveau_runtime_pm == 0)
@@ -937,7 +979,7 @@ static int nouveau_pmops_runtime_resume(struct device *dev)
ret = nouveau_do_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
- nv_mask(device, 0x88488, (1 << 25), (1 << 25));
+ nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
nv_debug_level(NORMAL);
@@ -1005,24 +1047,41 @@ nouveau_drm_pci_driver = {
.driver.pm = &nouveau_pm_ops,
};
-int nouveau_drm_platform_probe(struct platform_device *pdev)
+struct drm_device *
+nouveau_platform_device_create_(struct platform_device *pdev, int size,
+ void **pobject)
{
- struct nouveau_device *device;
- int ret;
+ struct drm_device *drm;
+ int err;
- ret = nouveau_device_create(pdev, NOUVEAU_BUS_PLATFORM,
+ err = nouveau_device_create_(pdev, NOUVEAU_BUS_PLATFORM,
nouveau_platform_name(pdev),
dev_name(&pdev->dev), nouveau_config,
- nouveau_debug, &device);
-
- ret = drm_platform_init(&driver, pdev);
- if (ret) {
- nouveau_object_ref(NULL, (struct nouveau_object **)&device);
- return ret;
+ nouveau_debug, size, pobject);
+ if (err)
+ return ERR_PTR(err);
+
+ drm = drm_dev_alloc(&driver, &pdev->dev);
+ if (!drm) {
+ err = -ENOMEM;
+ goto err_free;
}
- return ret;
+ err = drm_dev_set_unique(drm, "%s", dev_name(&pdev->dev));
+ if (err < 0)
+ goto err_free;
+
+ drm->platformdev = pdev;
+ platform_set_drvdata(pdev, drm);
+
+ return drm;
+
+err_free:
+ nouveau_object_ref(NULL, (struct nouveau_object **)pobject);
+
+ return ERR_PTR(err);
}
+EXPORT_SYMBOL(nouveau_platform_device_create_);
static int __init
nouveau_drm_init(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 7efbafaf7c1d..b02b02452c85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -9,8 +9,8 @@
#define DRIVER_DATE "20120801"
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 1
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 2
+#define DRIVER_PATCHLEVEL 0
/*
* 1.1.1:
@@ -21,15 +21,17 @@
* to control registers on the MPs to enable performance counters,
* and to control the warp error enable mask (OpenGL requires out of
* bounds access to local memory to be silently ignored / return 0).
+ * 1.1.2:
+ * - fixes multiple bugs in flip completion events and timestamping
+ * 1.2.0:
+ * - object api exposed to userspace
+ * - fermi,kepler,maxwell zbc
*/
-#include <core/client.h>
-#include <core/event.h>
-
-#include <subdev/vm.h>
+#include <nvif/client.h>
+#include <nvif/device.h>
#include <drmP.h>
-#include <drm/nouveau_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -38,7 +40,10 @@
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_page_alloc.h>
+#include "uapi/drm/nouveau_drm.h"
+
struct nouveau_channel;
+struct platform_device;
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
@@ -50,6 +55,17 @@ struct nouveau_drm_tile {
bool used;
};
+enum nouveau_drm_object_route {
+ NVDRM_OBJECT_NVIF = 0,
+ NVDRM_OBJECT_USIF,
+ NVDRM_OBJECT_ABI16,
+};
+
+enum nouveau_drm_notify_route {
+ NVDRM_NOTIFY_NVIF = 0,
+ NVDRM_NOTIFY_USIF
+};
+
enum nouveau_drm_handle {
NVDRM_CLIENT = 0xffffffff,
NVDRM_DEVICE = 0xdddddddd,
@@ -61,10 +77,13 @@ enum nouveau_drm_handle {
};
struct nouveau_cli {
- struct nouveau_client base;
+ struct nvif_client base;
+ struct nouveau_vm *vm; /*XXX*/
struct list_head head;
struct mutex mutex;
void *abi16;
+ struct list_head objects;
+ struct list_head notifys;
};
static inline struct nouveau_cli *
@@ -73,13 +92,16 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL;
}
+#include <nvif/object.h>
+#include <nvif/device.h>
+
extern int nouveau_runtime_pm;
struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
- struct nouveau_object *device;
+ struct nvif_device device;
struct list_head clients;
struct {
@@ -102,6 +124,7 @@ struct nouveau_drm {
struct ttm_buffer_object *,
struct ttm_mem_reg *, struct ttm_mem_reg *);
struct nouveau_channel *chan;
+ struct nvif_object copy;
int mtrr;
} ttm;
@@ -119,6 +142,8 @@ struct nouveau_drm {
struct nouveau_channel *channel;
struct nouveau_gpuobj *notify;
struct nouveau_fbdev *fbcon;
+ struct nvif_object nvsw;
+ struct nvif_object ntfy;
/* nv10-nv40 tiling regions */
struct {
@@ -148,20 +173,25 @@ nouveau_drm(struct drm_device *dev)
return dev->dev_private;
}
-static inline struct nouveau_device *
-nouveau_dev(struct drm_device *dev)
-{
- return nv_device(nouveau_drm(dev)->device);
-}
-
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
-#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
-#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
-#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
-#define NV_INFO(cli, fmt, args...) nv_info((cli), fmt, ##args)
-#define NV_DEBUG(cli, fmt, args...) nv_debug((cli), fmt, ##args)
+#define nouveau_platform_device_create(p, u) \
+ nouveau_platform_device_create_(p, sizeof(**u), (void **)u)
+struct drm_device *
+nouveau_platform_device_create_(struct platform_device *pdev,
+ int size, void **pobject);
+void nouveau_drm_device_remove(struct drm_device *dev);
+
+#define NV_PRINTK(l,c,f,a...) do { \
+ struct nouveau_cli *_cli = (c); \
+ nv_##l(_cli->base.base.priv, f, ##a); \
+} while(0)
+#define NV_FATAL(drm,f,a...) NV_PRINTK(fatal, &(drm)->client, f, ##a)
+#define NV_ERROR(drm,f,a...) NV_PRINTK(error, &(drm)->client, f, ##a)
+#define NV_WARN(drm,f,a...) NV_PRINTK(warn, &(drm)->client, f, ##a)
+#define NV_INFO(drm,f,a...) NV_PRINTK(info, &(drm)->client, f, ##a)
+#define NV_DEBUG(drm,f,a...) NV_PRINTK(debug, &(drm)->client, f, ##a)
extern int nouveau_modeset;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 191665ee7f52..ebfe3180109e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -51,11 +51,6 @@
#include "nouveau_crtc.h"
-#include <core/client.h>
-#include <core/device.h>
-
-#include <subdev/fb.h>
-
MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
static int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
@@ -65,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -74,10 +69,10 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&drm->client.mutex)) {
- if (device->card_type < NV_50)
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
ret = nv04_fbcon_fillrect(info, rect);
else
- if (device->card_type < NV_C0)
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
ret = nv50_fbcon_fillrect(info, rect);
else
ret = nvc0_fbcon_fillrect(info, rect);
@@ -97,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -106,10 +101,10 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&drm->client.mutex)) {
- if (device->card_type < NV_50)
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
ret = nv04_fbcon_copyarea(info, image);
else
- if (device->card_type < NV_C0)
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
ret = nv50_fbcon_copyarea(info, image);
else
ret = nvc0_fbcon_copyarea(info, image);
@@ -129,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *fbcon = info->par;
struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -138,10 +133,10 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&drm->client.mutex)) {
- if (device->card_type < NV_50)
+ if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
ret = nv04_fbcon_imageblit(info, image);
else
- if (device->card_type < NV_C0)
+ if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
ret = nv50_fbcon_imageblit(info, image);
else
ret = nvc0_fbcon_imageblit(info, image);
@@ -212,6 +207,65 @@ static struct fb_ops nouveau_fbcon_sw_ops = {
.fb_debug_leave = drm_fb_helper_debug_leave,
};
+void
+nouveau_fbcon_accel_save_disable(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon) {
+ drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+ drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ }
+}
+
+void
+nouveau_fbcon_accel_restore(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->fbcon) {
+ drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
+ }
+}
+
+void
+nouveau_fbcon_accel_fini(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ if (fbcon && drm->channel) {
+ console_lock();
+ fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ console_unlock();
+ nouveau_channel_idle(drm->channel);
+ nvif_object_fini(&fbcon->twod);
+ nvif_object_fini(&fbcon->blit);
+ nvif_object_fini(&fbcon->gdi);
+ nvif_object_fini(&fbcon->patt);
+ nvif_object_fini(&fbcon->rop);
+ nvif_object_fini(&fbcon->clip);
+ nvif_object_fini(&fbcon->surf2d);
+ }
+}
+
+void
+nouveau_fbcon_accel_init(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ struct fb_info *info = fbcon->helper.fbdev;
+ int ret;
+
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
+ ret = nv04_fbcon_accel_init(info);
+ else
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ ret = nv50_fbcon_accel_init(info);
+ else
+ ret = nvc0_fbcon_accel_init(info);
+
+ if (ret == 0)
+ info->fbops = &nouveau_fbcon_ops;
+}
+
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
@@ -257,7 +311,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
struct drm_device *dev = fbcon->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct fb_info *info;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
@@ -299,8 +353,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
}
chan = nouveau_nofbaccel ? NULL : drm->channel;
- if (chan && device->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
+ if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ ret = nouveau_bo_vma_add(nvbo, drm->client.vm,
&fbcon->nouveau_fb.vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
@@ -357,20 +411,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
mutex_unlock(&dev->struct_mutex);
- if (chan) {
- ret = -ENODEV;
- if (device->card_type < NV_50)
- ret = nv04_fbcon_accel_init(info);
- else
- if (device->card_type < NV_C0)
- ret = nv50_fbcon_accel_init(info);
- else
- ret = nvc0_fbcon_accel_init(info);
-
- if (ret == 0)
- info->fbops = &nouveau_fbcon_ops;
- }
-
+ if (chan)
+ nouveau_fbcon_accel_init(dev);
nouveau_fbcon_zfill(dev, fbcon);
/* To allow resizeing without swapping buffers */
@@ -438,7 +480,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
info->flags |= FBINFO_HWACCEL_DISABLED;
}
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
.fb_probe = nouveau_fbcon_create,
@@ -449,7 +491,6 @@ int
nouveau_fbcon_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct nouveau_fbdev *fbcon;
int preferred_bpp;
int ret;
@@ -464,7 +505,8 @@ nouveau_fbcon_init(struct drm_device *dev)
fbcon->dev = dev;
drm->fbcon = fbcon;
- fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
ret = drm_fb_helper_init(dev, &fbcon->helper,
dev->mode_config.num_crtc, 4);
@@ -475,10 +517,10 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_single_add_all_connectors(&fbcon->helper);
- if (pfb->ram->size <= 32 * 1024 * 1024)
+ if (drm->device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
else
- if (pfb->ram->size <= 64 * 1024 * 1024)
+ if (drm->device.info.ram_size <= 64 * 1024 * 1024)
preferred_bpp = 16;
else
preferred_bpp = 32;
@@ -498,43 +540,25 @@ nouveau_fbcon_fini(struct drm_device *dev)
if (!drm->fbcon)
return;
+ nouveau_fbcon_accel_fini(dev);
nouveau_fbcon_destroy(dev, drm->fbcon);
kfree(drm->fbcon);
drm->fbcon = NULL;
}
void
-nouveau_fbcon_save_disable_accel(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
- drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
- }
-}
-
-void
-nouveau_fbcon_restore_accel(struct drm_device *dev)
-{
- struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
- }
-}
-
-void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) {
console_lock();
- if (state == 1)
- nouveau_fbcon_save_disable_accel(dev);
- fb_set_suspend(drm->fbcon->helper.fbdev, state);
if (state == 0) {
- nouveau_fbcon_restore_accel(dev);
+ nouveau_fbcon_accel_restore(dev);
nouveau_fbcon_zfill(dev, drm->fbcon);
}
+ fb_set_suspend(drm->fbcon->helper.fbdev, state);
+ if (state == 1)
+ nouveau_fbcon_accel_save_disable(dev);
console_unlock();
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fcff797d2084..34658cfa8f5d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -37,6 +37,13 @@ struct nouveau_fbdev {
struct list_head fbdev_list;
struct drm_device *dev;
unsigned int saved_flags;
+ struct nvif_object surf2d;
+ struct nvif_object clip;
+ struct nvif_object rop;
+ struct nvif_object patt;
+ struct nvif_object gdi;
+ struct nvif_object blit;
+ struct nvif_object twod;
};
void nouveau_fbcon_restore(void);
@@ -61,8 +68,8 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
int nouveau_fbcon_init(struct drm_device *dev);
void nouveau_fbcon_fini(struct drm_device *dev);
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
-void nouveau_fbcon_restore_accel(struct drm_device *dev);
+void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
+void nouveau_fbcon_accel_restore(struct drm_device *dev);
void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab5ea3b0d666..0a93114158cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -29,12 +29,13 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
+#include <nvif/notify.h>
+#include <nvif/event.h>
+
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
-#include <engine/fifo.h>
-
struct fence_work {
struct work_struct base;
struct list_head head;
@@ -165,12 +166,18 @@ nouveau_fence_done(struct nouveau_fence *fence)
return !fence->channel;
}
+struct nouveau_fence_wait {
+ struct nouveau_fence_priv *priv;
+ struct nvif_notify notify;
+};
+
static int
-nouveau_fence_wait_uevent_handler(void *data, u32 type, int index)
+nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
{
- struct nouveau_fence_priv *priv = data;
- wake_up_all(&priv->waiting);
- return NVKM_EVENT_KEEP;
+ struct nouveau_fence_wait *wait =
+ container_of(notify, typeof(*wait), notify);
+ wake_up_all(&wait->priv->waiting);
+ return NVIF_NOTIFY_KEEP;
}
static int
@@ -178,18 +185,22 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
{
struct nouveau_channel *chan = fence->channel;
- struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
struct nouveau_fence_priv *priv = chan->drm->fence;
- struct nouveau_eventh *handler;
+ struct nouveau_fence_wait wait = { .priv = priv };
int ret = 0;
- ret = nouveau_event_new(pfifo->uevent, 1, 0,
- nouveau_fence_wait_uevent_handler,
- priv, &handler);
+ ret = nvif_notify_init(chan->object, NULL,
+ nouveau_fence_wait_uevent_handler, false,
+ G82_CHANNEL_DMA_V0_NTFY_UEVENT,
+ &(struct nvif_notify_uevent_req) {
+ },
+ sizeof(struct nvif_notify_uevent_req),
+ sizeof(struct nvif_notify_uevent_rep),
+ &wait.notify);
if (ret)
return ret;
- nouveau_event_get(handler);
+ nvif_notify_get(&wait.notify);
if (fence->timeout) {
unsigned long timeout = fence->timeout - jiffies;
@@ -221,7 +232,7 @@ nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
}
}
- nouveau_event_ref(NULL, &handler);
+ nvif_notify_fini(&wait.notify);
if (unlikely(ret < 0))
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c90c0dc0afe8..292a677bfed4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
*
*/
-#include <subdev/fb.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -58,14 +56,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma;
int ret;
- if (!cli->base.vm)
+ if (!cli->vm)
return 0;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma) {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) {
@@ -73,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
- ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
+ ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
if (ret) {
kfree(vma);
goto out;
@@ -129,14 +127,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma;
int ret;
- if (!cli->base.vm)
+ if (!cli->vm)
return;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return;
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) {
if (--vma->refcount == 0)
nouveau_gem_object_unmap(nvbo, vma);
@@ -173,7 +171,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
/* Initialize the embedded gem-object. We return a single gem-reference
@@ -202,8 +200,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
- if (cli->base.vm) {
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ if (cli->vm) {
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma)
return -EINVAL;
@@ -223,13 +221,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
- NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -350,7 +348,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
ww_acquire_init(&op->ticket, &reservation_ww_class);
retry:
if (++trycnt > 100000) {
- NV_ERROR(cli, "%s failed and gave up.\n", __func__);
+ NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -361,7 +359,7 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
+ NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
ww_acquire_done(&op->ticket);
validate_fini(op, NULL);
return -ENOENT;
@@ -374,7 +372,7 @@ retry:
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_ERROR(cli, "multiple instances of buffer %d on "
+ NV_PRINTK(error, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
ww_acquire_done(&op->ticket);
@@ -396,7 +394,7 @@ retry:
ww_acquire_fini(&op->ticket);
drm_gem_object_unreference_unlocked(gem);
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "fail reserve\n");
+ NV_PRINTK(error, cli, "fail reserve\n");
return ret;
}
}
@@ -414,7 +412,7 @@ retry:
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &op->gart_list);
else {
- NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
+ NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list);
ww_acquire_done(&op->ticket);
@@ -465,24 +463,24 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_ERROR(cli, "fail set_domain\n");
+ NV_PRINTK(error, cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "fail ttm_validate\n");
+ NV_PRINTK(error, cli, "fail ttm_validate\n");
return ret;
}
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(cli, "fail post-validate sync\n");
+ NV_PRINTK(error, cli, "fail post-validate sync\n");
return ret;
}
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -527,14 +525,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate_init\n");
+ NV_PRINTK(error, cli, "validate_init\n");
return ret;
}
ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate vram_list\n");
+ NV_PRINTK(error, cli, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -543,7 +541,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate gart_list\n");
+ NV_PRINTK(error, cli, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -552,7 +550,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate both_list\n");
+ NV_PRINTK(error, cli, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -613,7 +611,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_ERROR(cli, "reloc bo index invalid\n");
+ NV_PRINTK(error, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -623,7 +621,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_ERROR(cli, "reloc container bo index invalid\n");
+ NV_PRINTK(error, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -631,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_ERROR(cli, "reloc outside of bo\n");
+ NV_PRINTK(error, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -640,7 +638,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_ERROR(cli, "failed kmap for reloc\n");
+ NV_PRINTK(error, cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -665,7 +663,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
- NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
+ NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -696,7 +694,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return -ENOMEM;
list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
+ if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
chan = temp->chan;
break;
}
@@ -711,19 +709,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
@@ -741,7 +739,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(cli, "push %d buffer not in list\n", i);
+ NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -752,7 +750,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate: %d\n", ret);
+ NV_PRINTK(error, cli, "validate: %d\n", ret);
goto out_prevalid;
}
@@ -760,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
- NV_ERROR(cli, "reloc apply: %d\n", ret);
+ NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -768,7 +766,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_ERROR(cli, "nv50cal_space: %d\n", ret);
+ NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -780,10 +778,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (nv_device(drm->device)->chipset >= 0x25) {
+ if (drm->device.info.chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_ERROR(cli, "cal_space: %d\n", ret);
+ NV_PRINTK(error, cli, "cal_space: %d\n", ret);
goto out;
}
@@ -797,7 +795,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_ERROR(cli, "jmp_space: %d\n", ret);
+ NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
goto out;
}
@@ -835,7 +833,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
- NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
+ NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
@@ -853,7 +851,7 @@ out_next:
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (nv_device(drm->device)->chipset >= 0x25) {
+ if (drm->device.info.chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 19fd767bab10..afb36d66e78d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -34,17 +34,13 @@
#include "nouveau_drm.h"
#include "nouveau_hwmon.h"
-#include <subdev/gpio.h>
-#include <subdev/timer.h>
-#include <subdev/therm.h>
-
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
static ssize_t
nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int temp = therm->temp_get(therm);
if (temp < 0)
@@ -70,7 +66,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
@@ -82,7 +78,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -103,7 +99,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
@@ -115,7 +111,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -135,7 +131,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK) * 1000);
@@ -146,7 +142,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -166,7 +162,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
@@ -177,7 +173,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -198,7 +194,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL) * 1000);
@@ -210,7 +206,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -231,7 +227,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
@@ -244,7 +240,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -264,7 +260,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
@@ -276,7 +272,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -297,7 +293,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n",
therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
@@ -310,7 +306,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
if (kstrtol(buf, 10, &value) == -EINVAL)
@@ -350,7 +346,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
}
@@ -363,7 +359,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MODE);
@@ -379,7 +375,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
int ret;
@@ -402,7 +398,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->fan_get(therm);
@@ -418,7 +414,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret = -ENODEV;
long value;
@@ -442,7 +438,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MIN_DUTY);
@@ -458,7 +454,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
int ret;
@@ -482,7 +478,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
int ret;
ret = therm->attr_get(therm, NOUVEAU_THERM_ATTR_FAN_MAX_DUTY);
@@ -498,7 +494,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
{
struct drm_device *dev = dev_get_drvdata(d);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
long value;
int ret;
@@ -565,7 +561,7 @@ nouveau_hwmon_init(struct drm_device *dev)
{
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_therm *therm = nouveau_therm(drm->device);
+ struct nouveau_therm *therm = nvkm_therm(&drm->device);
struct nouveau_hwmon *hwmon;
struct device *hwmon_dev;
int ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_nvif.c b/drivers/gpu/drm/nouveau/nouveau_nvif.c
new file mode 100644
index 000000000000..47ca88623753
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_nvif.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+/*******************************************************************************
+ * NVIF client driver - NVKM directly linked
+ ******************************************************************************/
+
+#include <core/client.h>
+#include <core/notify.h>
+#include <core/ioctl.h>
+
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/notify.h>
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_usif.h"
+
+static void
+nvkm_client_unmap(void *priv, void *ptr, u32 size)
+{
+ iounmap(ptr);
+}
+
+static void *
+nvkm_client_map(void *priv, u64 handle, u32 size)
+{
+ return ioremap(handle, size);
+}
+
+static int
+nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
+{
+ return nvkm_ioctl(priv, super, data, size, hack);
+}
+
+static int
+nvkm_client_resume(void *priv)
+{
+ return nouveau_client_init(priv);
+}
+
+static int
+nvkm_client_suspend(void *priv)
+{
+ return nouveau_client_fini(priv, true);
+}
+
+static void
+nvkm_client_fini(void *priv)
+{
+ struct nouveau_object *client = priv;
+ nouveau_client_fini(nv_client(client), false);
+ atomic_set(&client->refcount, 1);
+ nouveau_object_ref(NULL, &client);
+}
+
+static int
+nvkm_client_ntfy(const void *header, u32 length, const void *data, u32 size)
+{
+ const union {
+ struct nvif_notify_req_v0 v0;
+ } *args = header;
+ u8 route;
+
+ if (length == sizeof(args->v0) && args->v0.version == 0) {
+ route = args->v0.route;
+ } else {
+ WARN_ON(1);
+ return NVKM_NOTIFY_DROP;
+ }
+
+ switch (route) {
+ case NVDRM_NOTIFY_NVIF:
+ return nvif_notify(header, length, data, size);
+ case NVDRM_NOTIFY_USIF:
+ return usif_notify(header, length, data, size);
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return NVKM_NOTIFY_DROP;
+}
+
+static int
+nvkm_client_init(const char *name, u64 device, const char *cfg,
+ const char *dbg, void **ppriv)
+{
+ struct nouveau_client *client;
+ int ret;
+
+ ret = nouveau_client_create(name, device, cfg, dbg, &client);
+ *ppriv = client;
+ if (ret)
+ return ret;
+
+ client->ntfy = nvkm_client_ntfy;
+ return 0;
+}
+
+const struct nvif_driver
+nvif_driver_nvkm = {
+ .name = "nvkm",
+ .init = nvkm_client_init,
+ .fini = nvkm_client_fini,
+ .suspend = nvkm_client_suspend,
+ .resume = nvkm_client_resume,
+ .ioctl = nvkm_client_ioctl,
+ .map = nvkm_client_map,
+ .unmap = nvkm_client_unmap,
+ .keep = false,
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
new file mode 100644
index 000000000000..0ffeb50d0088
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+#include <soc/tegra/pmc.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_platform.h"
+
+static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
+{
+ int err;
+
+ err = regulator_enable(gpu->vdd);
+ if (err)
+ goto err_power;
+
+ err = clk_prepare_enable(gpu->clk);
+ if (err)
+ goto err_clk;
+ err = clk_prepare_enable(gpu->clk_pwr);
+ if (err)
+ goto err_clk_pwr;
+ clk_set_rate(gpu->clk_pwr, 204000000);
+ udelay(10);
+
+ reset_control_assert(gpu->rst);
+ udelay(10);
+
+ err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
+ if (err)
+ goto err_clamp;
+ udelay(10);
+
+ reset_control_deassert(gpu->rst);
+ udelay(10);
+
+ return 0;
+
+err_clamp:
+ clk_disable_unprepare(gpu->clk_pwr);
+err_clk_pwr:
+ clk_disable_unprepare(gpu->clk);
+err_clk:
+ regulator_disable(gpu->vdd);
+err_power:
+ return err;
+}
+
+static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
+{
+ int err;
+
+ reset_control_assert(gpu->rst);
+ udelay(10);
+
+ clk_disable_unprepare(gpu->clk_pwr);
+ clk_disable_unprepare(gpu->clk);
+ udelay(10);
+
+ err = regulator_disable(gpu->vdd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int nouveau_platform_probe(struct platform_device *pdev)
+{
+ struct nouveau_platform_gpu *gpu;
+ struct nouveau_platform_device *device;
+ struct drm_device *drm;
+ int err;
+
+ gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
+ if (!gpu)
+ return -ENOMEM;
+
+ gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(gpu->vdd))
+ return PTR_ERR(gpu->vdd);
+
+ gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
+ if (IS_ERR(gpu->rst))
+ return PTR_ERR(gpu->rst);
+
+ gpu->clk = devm_clk_get(&pdev->dev, "gpu");
+ if (IS_ERR(gpu->clk))
+ return PTR_ERR(gpu->clk);
+
+ gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
+ if (IS_ERR(gpu->clk_pwr))
+ return PTR_ERR(gpu->clk_pwr);
+
+ err = nouveau_platform_power_up(gpu);
+ if (err)
+ return err;
+
+ drm = nouveau_platform_device_create(pdev, &device);
+ if (IS_ERR(drm)) {
+ err = PTR_ERR(drm);
+ goto power_down;
+ }
+
+ device->gpu = gpu;
+
+ err = drm_dev_register(drm, 0);
+ if (err < 0)
+ goto err_unref;
+
+ return 0;
+
+err_unref:
+ drm_dev_unref(drm);
+
+ return 0;
+
+power_down:
+ nouveau_platform_power_down(gpu);
+
+ return err;
+}
+
+static int nouveau_platform_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm_dev = platform_get_drvdata(pdev);
+ struct nouveau_device *device = nouveau_dev(drm_dev);
+ struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
+
+ nouveau_drm_device_remove(drm_dev);
+
+ return nouveau_platform_power_down(gpu);
+}
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id nouveau_platform_match[] = {
+ { .compatible = "nvidia,gk20a" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, nouveau_platform_match);
+#endif
+
+struct platform_driver nouveau_platform_driver = {
+ .driver = {
+ .name = "nouveau",
+ .of_match_table = of_match_ptr(nouveau_platform_match),
+ },
+ .probe = nouveau_platform_probe,
+ .remove = nouveau_platform_remove,
+};
+
+module_platform_driver(nouveau_platform_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
new file mode 100644
index 000000000000..91f66504900e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_PLATFORM_H__
+#define __NOUVEAU_PLATFORM_H__
+
+#include "core/device.h"
+
+struct reset_control;
+struct clk;
+struct regulator;
+
+struct nouveau_platform_gpu {
+ struct reset_control *rst;
+ struct clk *clk;
+ struct clk *clk_pwr;
+
+ struct regulator *vdd;
+};
+
+struct nouveau_platform_device {
+ struct nouveau_device device;
+
+ struct nouveau_platform_gpu *gpu;
+};
+
+#define nv_device_to_platform(d) \
+ container_of(d, struct nouveau_platform_device, device)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index a4d22e5eb176..01707e7deaf5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,8 +1,6 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
-#include <subdev/fb.h>
-
#include "nouveau_drm.h"
#include "nouveau_ttm.h"
@@ -104,7 +102,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
return NULL;
nvbe->dev = drm->dev;
- if (nv_device(drm->device)->card_type < NV_50)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
nvbe->ttm.ttm.func = &nv04_sgdma_backend;
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.c b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
index 75dda2b07176..3c6962d15b26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.c
@@ -22,10 +22,15 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
+#include <nvif/os.h>
+#include <nvif/class.h>
+#include <nvif/ioctl.h>
+
#include "nouveau_sysfs.h"
-#include <core/object.h>
-#include <core/class.h>
+MODULE_PARM_DESC(pstate, "enable sysfs pstate file, which will be moved in the future");
+static int nouveau_pstate;
+module_param_named(pstate, nouveau_pstate, int, 0400);
static inline struct drm_device *
drm_device(struct device *d)
@@ -43,38 +48,42 @@ static ssize_t
nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
{
struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
- struct nv_control_pstate_info info;
+ struct nvif_control_pstate_info_v0 info = {};
size_t cnt = PAGE_SIZE;
char *buf = b;
int ret, i;
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_INFO, &info, sizeof(info));
+ ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_INFO,
+ &info, sizeof(info));
if (ret)
return ret;
for (i = 0; i < info.count + 1; i++) {
const s32 state = i < info.count ? i :
- NV_CONTROL_PSTATE_ATTR_STATE_CURRENT;
- struct nv_control_pstate_attr attr = {
+ NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT;
+ struct nvif_control_pstate_attr_v0 attr = {
.state = state,
.index = 0,
};
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
- &attr, sizeof(attr));
+ ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_ATTR,
+ &attr, sizeof(attr));
if (ret)
return ret;
if (i < info.count)
snappendf(buf, cnt, "%02x:", attr.state);
else
- snappendf(buf, cnt, "--:");
+ snappendf(buf, cnt, "%s:", info.pwrsrc == 0 ? "DC" :
+ info.pwrsrc == 1 ? "AC" :
+ "--");
attr.index = 0;
do {
attr.state = state;
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_ATTR,
- &attr, sizeof(attr));
+ ret = nvif_mthd(&sysfs->ctrl,
+ NVIF_CONTROL_PSTATE_ATTR,
+ &attr, sizeof(attr));
if (ret)
return ret;
@@ -84,9 +93,20 @@ nouveau_sysfs_pstate_get(struct device *d, struct device_attribute *a, char *b)
snappendf(buf, cnt, " %s", attr.unit);
} while (attr.index);
- if ((state >= 0 && info.pstate == state) ||
- (state < 0 && info.ustate < 0))
- snappendf(buf, cnt, " *");
+ if (state >= 0) {
+ if (info.ustate_ac == state)
+ snappendf(buf, cnt, " AC");
+ if (info.ustate_dc == state)
+ snappendf(buf, cnt, " DC");
+ if (info.pstate == state)
+ snappendf(buf, cnt, " *");
+ } else {
+ if (info.ustate_ac < -1)
+ snappendf(buf, cnt, " AC");
+ if (info.ustate_dc < -1)
+ snappendf(buf, cnt, " DC");
+ }
+
snappendf(buf, cnt, "\n");
}
@@ -98,26 +118,36 @@ nouveau_sysfs_pstate_set(struct device *d, struct device_attribute *a,
const char *buf, size_t count)
{
struct nouveau_sysfs *sysfs = nouveau_sysfs(drm_device(d));
- struct nv_control_pstate_user args;
+ struct nvif_control_pstate_user_v0 args = { .pwrsrc = -EINVAL };
long value, ret;
char *tmp;
if ((tmp = strchr(buf, '\n')))
*tmp = '\0';
+ if (!strncasecmp(buf, "dc:", 3)) {
+ args.pwrsrc = 0;
+ buf += 3;
+ } else
+ if (!strncasecmp(buf, "ac:", 3)) {
+ args.pwrsrc = 1;
+ buf += 3;
+ }
+
if (!strcasecmp(buf, "none"))
- args.state = NV_CONTROL_PSTATE_USER_STATE_UNKNOWN;
+ args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN;
else
if (!strcasecmp(buf, "auto"))
- args.state = NV_CONTROL_PSTATE_USER_STATE_PERFMON;
+ args.ustate = NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON;
else {
ret = kstrtol(buf, 16, &value);
if (ret)
return ret;
- args.state = value;
+ args.ustate = value;
}
- ret = nv_exec(sysfs->ctrl, NV_CONTROL_PSTATE_USER, &args, sizeof(args));
+ ret = nvif_mthd(&sysfs->ctrl, NVIF_CONTROL_PSTATE_USER,
+ &args, sizeof(args));
if (ret < 0)
return ret;
@@ -132,11 +162,11 @@ nouveau_sysfs_fini(struct drm_device *dev)
{
struct nouveau_sysfs *sysfs = nouveau_sysfs(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
- if (sysfs->ctrl) {
- device_remove_file(nv_device_base(device), &dev_attr_pstate);
- nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL);
+ if (sysfs && sysfs->ctrl.priv) {
+ device_remove_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
+ nvif_object_fini(&sysfs->ctrl);
}
drm->sysfs = NULL;
@@ -147,18 +177,22 @@ int
nouveau_sysfs_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nv_device(drm->device);
+ struct nvif_device *device = &drm->device;
struct nouveau_sysfs *sysfs;
int ret;
+ if (!nouveau_pstate)
+ return 0;
+
sysfs = drm->sysfs = kzalloc(sizeof(*sysfs), GFP_KERNEL);
if (!sysfs)
return -ENOMEM;
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, NVDRM_CONTROL,
- NV_CONTROL_CLASS, NULL, 0, &sysfs->ctrl);
+ ret = nvif_object_init(nvif_object(device), NULL, NVDRM_CONTROL,
+ NVIF_IOCTL_NEW_V0_CONTROL, NULL, 0,
+ &sysfs->ctrl);
if (ret == 0)
- device_create_file(nv_device_base(device), &dev_attr_pstate);
+ device_create_file(nv_device_base(nvkm_device(device)), &dev_attr_pstate);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sysfs.h b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
index 74b47f1e01ed..f973378160f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sysfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_sysfs.h
@@ -4,7 +4,7 @@
#include "nouveau_drm.h"
struct nouveau_sysfs {
- struct nouveau_object *ctrl;
+ struct nvif_object ctrl;
};
static inline struct nouveau_sysfs *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index ab0228f640a5..53874b76b031 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,10 +24,6 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <subdev/fb.h>
-#include <subdev/vm.h>
-#include <subdev/instmem.h>
-
#include "nouveau_drm.h"
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
@@ -36,7 +32,7 @@ static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
man->priv = pfb;
return 0;
}
@@ -67,7 +63,7 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
nouveau_mem_node_cleanup(mem->mm_node);
pfb->ram->put(pfb, (struct nouveau_mem **)&mem->mm_node);
}
@@ -76,10 +72,11 @@ static int
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node;
u32 size_nc = 0;
@@ -162,6 +159,7 @@ static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -174,14 +172,13 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
node->page_shift = 12;
- switch (nv_device(drm->device)->card_type) {
- case NV_50:
- if (nv_device(drm->device)->chipset != 0x50)
+ switch (drm->device.info.family) {
+ case NV_DEVICE_INFO_V0_TESLA:
+ if (drm->device.info.chipset != 0x50)
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
break;
- case NV_C0:
- case NV_D0:
- case NV_E0:
+ case NV_DEVICE_INFO_V0_FERMI:
+ case NV_DEVICE_INFO_V0_KEPLER:
node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
break;
default:
@@ -206,12 +203,13 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_debug
};
+/*XXX*/
#include <core/subdev/vm/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
- struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
+ struct nouveau_vmmgr *vmm = nvkm_vmmgr(&drm->device);
struct nv04_vmmgr_priv *priv = (void *)vmm;
struct nouveau_vm *vm = NULL;
nouveau_vm_ref(priv->vm, &vm, NULL);
@@ -242,6 +240,7 @@ static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct nouveau_mem *node;
@@ -354,12 +353,11 @@ int
nouveau_ttm_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
- struct nouveau_device *device = nv_device(drm->device);
u32 bits;
int ret;
- bits = nouveau_vmmgr(drm->device)->dma_bits;
- if (nv_device_is_pci(device)) {
+ bits = nvkm_vmmgr(&drm->device)->dma_bits;
+ if (nv_device_is_pci(nvkm_device(&drm->device))) {
if (drm->agp.stat == ENABLED ||
!pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
bits = 32;
@@ -391,8 +389,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
/* VRAM init */
- drm->gem.vram_available = nouveau_fb(drm->device)->ram->size;
- drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
+ drm->gem.vram_available = drm->device.info.ram_user;
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
@@ -401,12 +398,12 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
- drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(device, 1),
- nv_device_resource_len(device, 1));
+ drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvkm_device(&drm->device), 1),
+ nv_device_resource_len(nvkm_device(&drm->device), 1));
/* GART init */
if (drm->agp.stat != ENABLED) {
- drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
+ drm->gem.gart_available = nvkm_vmmgr(&drm->device)->limit;
} else {
drm->gem.gart_available = drm->agp.size;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
new file mode 100644
index 000000000000..cb1182d7e80e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "nouveau_drm.h"
+#include "nouveau_usif.h"
+
+#include <nvif/notify.h>
+#include <nvif/unpack.h>
+#include <nvif/client.h>
+#include <nvif/event.h>
+#include <nvif/ioctl.h>
+
+struct usif_notify_p {
+ struct drm_pending_event base;
+ struct {
+ struct drm_event base;
+ u8 data[];
+ } e;
+};
+
+struct usif_notify {
+ struct list_head head;
+ atomic_t enabled;
+ u32 handle;
+ u16 reply;
+ u8 route;
+ u64 token;
+ struct usif_notify_p *p;
+};
+
+static inline struct usif_notify *
+usif_notify_find(struct drm_file *filp, u32 handle)
+{
+ struct nouveau_cli *cli = nouveau_cli(filp);
+ struct usif_notify *ntfy;
+ list_for_each_entry(ntfy, &cli->notifys, head) {
+ if (ntfy->handle == handle)
+ return ntfy;
+ }
+ return NULL;
+}
+
+static inline void
+usif_notify_dtor(struct usif_notify *ntfy)
+{
+ list_del(&ntfy->head);
+ kfree(ntfy);
+}
+
+int
+usif_notify(const void *header, u32 length, const void *data, u32 size)
+{
+ struct usif_notify *ntfy = NULL;
+ const union {
+ struct nvif_notify_rep_v0 v0;
+ } *rep = header;
+ struct drm_device *dev;
+ struct drm_file *filp;
+ unsigned long flags;
+
+ if (length == sizeof(rep->v0) && rep->v0.version == 0) {
+ if (WARN_ON(!(ntfy = (void *)(unsigned long)rep->v0.token)))
+ return NVIF_NOTIFY_DROP;
+ BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF);
+ } else
+ if (WARN_ON(1))
+ return NVIF_NOTIFY_DROP;
+
+ if (WARN_ON(!ntfy->p || ntfy->reply != (length + size)))
+ return NVIF_NOTIFY_DROP;
+ filp = ntfy->p->base.file_priv;
+ dev = filp->minor->dev;
+
+ memcpy(&ntfy->p->e.data[0], header, length);
+ memcpy(&ntfy->p->e.data[length], data, size);
+ switch (rep->v0.version) {
+ case 0: {
+ struct nvif_notify_rep_v0 *rep = (void *)ntfy->p->e.data;
+ rep->route = ntfy->route;
+ rep->token = ntfy->token;
+ }
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (!WARN_ON(filp->event_space < ntfy->p->e.base.length)) {
+ list_add_tail(&ntfy->p->base.link, &filp->event_list);
+ filp->event_space -= ntfy->p->e.base.length;
+ }
+ wake_up_interruptible(&filp->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ atomic_set(&ntfy->enabled, 0);
+ return NVIF_NOTIFY_DROP;
+}
+
+static int
+usif_notify_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_new_v0 v0;
+ } *args = data;
+ union {
+ struct nvif_notify_req_v0 v0;
+ } *req;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (usif_notify_find(f, args->v0.index))
+ return -EEXIST;
+ } else
+ return ret;
+ req = data;
+
+ if (!(ntfy = kmalloc(sizeof(*ntfy), GFP_KERNEL)))
+ return -ENOMEM;
+ atomic_set(&ntfy->enabled, 0);
+
+ if (nvif_unpack(req->v0, 0, 0, true)) {
+ ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply;
+ ntfy->route = req->v0.route;
+ ntfy->token = req->v0.token;
+ req->v0.route = NVDRM_NOTIFY_USIF;
+ req->v0.token = (unsigned long)(void *)ntfy;
+ ret = nvif_client_ioctl(client, argv, argc);
+ req->v0.token = ntfy->token;
+ req->v0.route = ntfy->route;
+ ntfy->handle = args->v0.index;
+ }
+
+ if (ret == 0)
+ list_add(&ntfy->head, &cli->notifys);
+ if (ret)
+ kfree(ntfy);
+ return ret;
+}
+
+static int
+usif_notify_del(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_del_v0 v0;
+ } *args = data;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (!(ntfy = usif_notify_find(f, args->v0.index)))
+ return -ENOENT;
+ } else
+ return ret;
+
+ ret = nvif_client_ioctl(client, argv, argc);
+ if (ret == 0)
+ usif_notify_dtor(ntfy);
+ return ret;
+}
+
+static int
+usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_del_v0 v0;
+ } *args = data;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (!(ntfy = usif_notify_find(f, args->v0.index)))
+ return -ENOENT;
+ } else
+ return ret;
+
+ if (atomic_xchg(&ntfy->enabled, 1))
+ return 0;
+
+ ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL);
+ if (ret = -ENOMEM, !ntfy->p)
+ goto done;
+ ntfy->p->base.event = &ntfy->p->e.base;
+ ntfy->p->base.file_priv = f;
+ ntfy->p->base.pid = current->pid;
+ ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
+ ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
+ ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
+
+ ret = nvif_client_ioctl(client, argv, argc);
+done:
+ if (ret) {
+ atomic_set(&ntfy->enabled, 0);
+ kfree(ntfy->p);
+ }
+ return ret;
+}
+
+static int
+usif_notify_put(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_ntfy_put_v0 v0;
+ } *args = data;
+ struct usif_notify *ntfy;
+ int ret;
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ if (!(ntfy = usif_notify_find(f, args->v0.index)))
+ return -ENOENT;
+ } else
+ return ret;
+
+ ret = nvif_client_ioctl(client, argv, argc);
+ if (ret == 0 && atomic_xchg(&ntfy->enabled, 0))
+ kfree(ntfy->p);
+ return ret;
+}
+
+struct usif_object {
+ struct list_head head;
+ struct list_head ntfy;
+ u8 route;
+ u64 token;
+};
+
+static void
+usif_object_dtor(struct usif_object *object)
+{
+ list_del(&object->head);
+ kfree(object);
+}
+
+static int
+usif_object_new(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(f);
+ struct nvif_client *client = &cli->base;
+ union {
+ struct nvif_ioctl_new_v0 v0;
+ } *args = data;
+ struct usif_object *object;
+ int ret;
+
+ if (!(object = kmalloc(sizeof(*object), GFP_KERNEL)))
+ return -ENOMEM;
+ list_add(&object->head, &cli->objects);
+
+ if (nvif_unpack(args->v0, 0, 0, true)) {
+ object->route = args->v0.route;
+ object->token = args->v0.token;
+ args->v0.route = NVDRM_OBJECT_USIF;
+ args->v0.token = (unsigned long)(void *)object;
+ ret = nvif_client_ioctl(client, argv, argc);
+ args->v0.token = object->token;
+ args->v0.route = object->route;
+ }
+
+ if (ret)
+ usif_object_dtor(object);
+ return ret;
+}
+
+int
+usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
+{
+ struct nouveau_cli *cli = nouveau_cli(filp);
+ struct nvif_client *client = &cli->base;
+ void *data = kmalloc(argc, GFP_KERNEL);
+ u32 size = argc;
+ union {
+ struct nvif_ioctl_v0 v0;
+ } *argv = data;
+ struct usif_object *object;
+ u8 owner;
+ int ret;
+
+ if (ret = -ENOMEM, !argv)
+ goto done;
+ if (ret = -EFAULT, copy_from_user(argv, user, size))
+ goto done;
+
+ if (nvif_unpack(argv->v0, 0, 0, true)) {
+ /* block access to objects not created via this interface */
+ owner = argv->v0.owner;
+ argv->v0.owner = NVDRM_OBJECT_USIF;
+ } else
+ goto done;
+
+ mutex_lock(&cli->mutex);
+ switch (argv->v0.type) {
+ case NVIF_IOCTL_V0_NEW:
+ /* ... except if we're creating children */
+ argv->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
+ ret = usif_object_new(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_NEW:
+ ret = usif_notify_new(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_DEL:
+ ret = usif_notify_del(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_GET:
+ ret = usif_notify_get(filp, data, size, argv, argc);
+ break;
+ case NVIF_IOCTL_V0_NTFY_PUT:
+ ret = usif_notify_put(filp, data, size, argv, argc);
+ break;
+ default:
+ ret = nvif_client_ioctl(client, argv, argc);
+ break;
+ }
+ if (argv->v0.route == NVDRM_OBJECT_USIF) {
+ object = (void *)(unsigned long)argv->v0.token;
+ argv->v0.route = object->route;
+ argv->v0.token = object->token;
+ if (ret == 0 && argv->v0.type == NVIF_IOCTL_V0_DEL) {
+ list_del(&object->head);
+ kfree(object);
+ }
+ } else {
+ argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN;
+ argv->v0.token = 0;
+ }
+ argv->v0.owner = owner;
+ mutex_unlock(&cli->mutex);
+
+ if (copy_to_user(user, argv, argc))
+ ret = -EFAULT;
+done:
+ kfree(argv);
+ return ret;
+}
+
+void
+usif_client_fini(struct nouveau_cli *cli)
+{
+ struct usif_object *object, *otemp;
+ struct usif_notify *notify, *ntemp;
+
+ list_for_each_entry_safe(notify, ntemp, &cli->notifys, head) {
+ usif_notify_dtor(notify);
+ }
+
+ list_for_each_entry_safe(object, otemp, &cli->objects, head) {
+ usif_object_dtor(object);
+ }
+}
+
+void
+usif_client_init(struct nouveau_cli *cli)
+{
+ INIT_LIST_HEAD(&cli->objects);
+ INIT_LIST_HEAD(&cli->notifys);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
new file mode 100644
index 000000000000..c037e3ae8c70
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.h
@@ -0,0 +1,9 @@
+#ifndef __NOUVEAU_USIF_H__
+#define __NOUVEAU_USIF_H__
+
+void usif_client_init(struct nouveau_cli *);
+void usif_client_fini(struct nouveau_cli *);
+int usif_ioctl(struct drm_file *, void __user *, u32);
+int usif_notify(const void *, u32, const void *, u32);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 4f4c3fec6916..18d55d447248 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -12,14 +12,16 @@
static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
- struct nouveau_device *device = nouveau_dev(priv);
+ struct nvif_device *device = &nouveau_drm(priv)->device;
- if (device->card_type == NV_40 && device->chipset >= 0x4c)
- nv_wr32(device, 0x088060, state);
- else if (device->chipset >= 0x40)
- nv_wr32(device, 0x088054, state);
+ if (device->info.family == NV_DEVICE_INFO_V0_CURIE &&
+ device->info.chipset >= 0x4c)
+ nvif_wr32(device, 0x088060, state);
else
- nv_wr32(device, 0x001854, state);
+ if (device->info.chipset >= 0x40)
+ nvif_wr32(device, 0x088054, state);
+ else
+ nvif_wr32(device, 0x001854, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 8fe32bbed99a..4ef602c5469d 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,8 +22,6 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <core/object.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
@@ -141,8 +139,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
- struct nouveau_device *device = nv_device(drm->device);
- struct nouveau_object *object;
+ struct nvif_device *device = &drm->device;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -174,35 +171,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
- device->card_type >= NV_10 ? 0x0062 : 0x0042,
- NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0062,
+ device->info.family >= NV_DEVICE_INFO_V0_CELSIUS ?
+ 0x0062 : 0x0042, NULL, 0, &nfbdev->surf2d);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
- 0x0019, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0019, 0x0019, NULL, 0,
+ &nfbdev->clip);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
- 0x0043, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0043, 0x0043, NULL, 0,
+ &nfbdev->rop);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
- 0x0044, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x0044, 0x0044, NULL, 0,
+ &nfbdev->patt);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
- 0x004a, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x004a, 0x004a, NULL, 0,
+ &nfbdev->gdi);
if (ret)
return ret;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
- device->chipset >= 0x11 ? 0x009f : 0x005f,
- NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x005f,
+ device->info.chipset >= 0x11 ? 0x009f : 0x005f,
+ NULL, 0, &nfbdev->blit);
if (ret)
return ret;
@@ -212,10 +209,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvCtxSurf2D);
+ OUT_RING(chan, nfbdev->surf2d.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
- OUT_RING(chan, NvDmaFB);
- OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, chan->vram.handle);
+ OUT_RING(chan, chan->vram.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
@@ -223,12 +220,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvRop);
+ OUT_RING(chan, nfbdev->rop.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
OUT_RING(chan, 0x55);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvImagePatt);
+ OUT_RING(chan, nfbdev->patt.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
@@ -244,18 +241,18 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
- OUT_RING(chan, NvClipRect);
+ OUT_RING(chan, nfbdev->clip.handle);
BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
BEGIN_NV04(chan, NvSubImageBlit, 0x0000, 1);
- OUT_RING(chan, NvImageBlit);
+ OUT_RING(chan, nfbdev->blit.handle);
BEGIN_NV04(chan, NvSubImageBlit, 0x019c, 1);
- OUT_RING(chan, NvCtxSurf2D);
+ OUT_RING(chan, nfbdev->surf2d.handle);
BEGIN_NV04(chan, NvSubImageBlit, 0x02fc, 1);
OUT_RING(chan, 3);
- if (device->chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
+ if (device->info.chipset >= 0x11 /*XXX: oclass == 0x009f*/) {
BEGIN_NV04(chan, NvSubImageBlit, 0x0120, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 1);
@@ -263,12 +260,12 @@ nv04_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NV04(chan, NvSubGdiRect, 0x0000, 1);
- OUT_RING(chan, NvGdiRect);
+ OUT_RING(chan, nfbdev->gdi.handle);
BEGIN_NV04(chan, NvSubGdiRect, 0x0198, 1);
- OUT_RING(chan, NvCtxSurf2D);
+ OUT_RING(chan, nfbdev->surf2d.handle);
BEGIN_NV04(chan, NvSubGdiRect, 0x0188, 2);
- OUT_RING(chan, NvImagePatt);
- OUT_RING(chan, NvRop);
+ OUT_RING(chan, nfbdev->patt.handle);
+ OUT_RING(chan, nfbdev->rop.handle);
BEGIN_NV04(chan, NvSubGdiRect, 0x0304, 1);
OUT_RING(chan, 1);
BEGIN_NV04(chan, NvSubGdiRect, 0x0300, 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 94eadd1dd10a..239c2c5a9615 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,8 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <engine/fifo.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -59,7 +57,7 @@ nv04_fence_sync(struct nouveau_fence *fence,
static u32
nv04_fence_read(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nouveau_fifo_chan *fifo = nvkm_fifo_chan(chan);;
return atomic_read(&fifo->refcnt);
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 06f434f03fba..4faaf0acf5d7 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,9 +22,6 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <core/object.h>
-#include <core/class.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
@@ -53,14 +50,18 @@ nv10_fence_sync(struct nouveau_fence *fence,
u32
nv10_fence_read(struct nouveau_channel *chan)
{
- return nv_ro32(chan->object, 0x0048);
+ return nvif_rd32(chan, 0x0048);
}
void
nv10_fence_context_del(struct nouveau_channel *chan)
{
struct nv10_fence_chan *fctx = chan->fence;
+ int i;
nouveau_fence_context_del(&fctx->base);
+ for (i = 0; i < ARRAY_SIZE(fctx->head); i++)
+ nvif_object_fini(&fctx->head[i]);
+ nvif_object_fini(&fctx->sema);
chan->fence = NULL;
kfree(fctx);
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index e5d9204826c2..a87259f3983a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -1,12 +1,13 @@
#ifndef __NV10_FENCE_H_
#define __NV10_FENCE_H_
-#include <core/os.h>
#include "nouveau_fence.h"
#include "nouveau_bo.h"
struct nv10_fence_chan {
struct nouveau_fence_chan base;
+ struct nvif_object sema;
+ struct nvif_object head[4];
};
struct nv10_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 22aa9963ea6f..ca907479f92f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -22,8 +22,8 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <core/object.h>
-#include <core/class.h>
+#include <nvif/os.h>
+#include <nvif/class.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -33,11 +33,13 @@ int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
+ struct nouveau_cli *cli = (void *)nvif_client(&prev->device->base);
struct nv10_fence_priv *priv = chan->drm->fence;
+ struct nv10_fence_chan *fctx = chan->fence;
u32 value;
int ret;
- if (!mutex_trylock(&prev->cli->mutex))
+ if (!mutex_trylock(&cli->mutex))
return -EBUSY;
spin_lock(&priv->lock);
@@ -48,7 +50,7 @@ nv17_fence_sync(struct nouveau_fence *fence,
ret = RING_SPACE(prev, 5);
if (!ret) {
BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (prev, NvSema);
+ OUT_RING (prev, fctx->sema.handle);
OUT_RING (prev, 0);
OUT_RING (prev, value + 0);
OUT_RING (prev, value + 1);
@@ -57,14 +59,14 @@ nv17_fence_sync(struct nouveau_fence *fence,
if (!ret && !(ret = RING_SPACE(chan, 5))) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
- OUT_RING (chan, NvSema);
+ OUT_RING (chan, fctx->sema.handle);
OUT_RING (chan, 0);
OUT_RING (chan, value + 1);
OUT_RING (chan, value + 2);
FIRE_RING (chan);
}
- mutex_unlock(&prev->cli->mutex);
+ mutex_unlock(&cli->mutex);
return 0;
}
@@ -74,7 +76,6 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
int ret = 0;
@@ -88,15 +89,14 @@ nv17_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_FROM_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &fctx->sema);
if (ret)
nv10_fence_context_del(chan);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 4c534b7b04da..03949eaa629f 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -28,6 +28,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
+#include <nvif/class.h>
+
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
@@ -37,15 +39,6 @@
#include "nouveau_fence.h"
#include "nv50_display.h"
-#include <core/client.h>
-#include <core/gpuobj.h>
-#include <core/class.h>
-
-#include <subdev/timer.h>
-#include <subdev/bar.h>
-#include <subdev/fb.h>
-#include <subdev/i2c.h>
-
#define EVO_DMA_NR 9
#define EVO_MASTER (0x00)
@@ -60,45 +53,34 @@
#define EVO_FLIP_SEM0(c) EVO_SYNC((c) + 1, 0x00)
#define EVO_FLIP_SEM1(c) EVO_SYNC((c) + 1, 0x10)
-#define EVO_CORE_HANDLE (0xd1500000)
-#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
-#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
-#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
- (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
-
/******************************************************************************
* EVO channel
*****************************************************************************/
struct nv50_chan {
- struct nouveau_object *user;
- u32 handle;
+ struct nvif_object user;
};
static int
-nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
+nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, struct nv50_chan *chan)
{
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
- const u32 handle = EVO_CHAN_HANDLE(bclass, head);
- int ret;
-
- ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
- oclass, data, size, &chan->user);
- if (ret)
- return ret;
-
- chan->handle = handle;
- return 0;
+ while (oclass[0]) {
+ int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head,
+ oclass[0], data, size,
+ &chan->user);
+ if (oclass++, ret == 0) {
+ nvif_object_map(&chan->user);
+ return ret;
+ }
+ }
+ return -ENOSYS;
}
static void
-nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
+nv50_chan_destroy(struct nv50_chan *chan)
{
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- if (chan->handle)
- nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
+ nvif_object_fini(&chan->user);
}
/******************************************************************************
@@ -110,16 +92,70 @@ struct nv50_pioc {
};
static void
-nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
+nv50_pioc_destroy(struct nv50_pioc *pioc)
{
- nv50_chan_destroy(core, &pioc->base);
+ nv50_chan_destroy(&pioc->base);
}
static int
-nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
+nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, struct nv50_pioc *pioc)
{
- return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
+ return nv50_chan_create(disp, oclass, head, data, size, &pioc->base);
+}
+
+/******************************************************************************
+ * Cursor Immediate
+ *****************************************************************************/
+
+struct nv50_curs {
+ struct nv50_pioc base;
+};
+
+static int
+nv50_curs_create(struct nvif_object *disp, int head, struct nv50_curs *curs)
+{
+ struct nv50_disp_cursor_v0 args = {
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK104_DISP_CURSOR,
+ GF110_DISP_CURSOR,
+ GT214_DISP_CURSOR,
+ G82_DISP_CURSOR,
+ NV50_DISP_CURSOR,
+ 0
+ };
+
+ return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
+ &curs->base);
+}
+
+/******************************************************************************
+ * Overlay Immediate
+ *****************************************************************************/
+
+struct nv50_oimm {
+ struct nv50_pioc base;
+};
+
+static int
+nv50_oimm_create(struct nvif_object *disp, int head, struct nv50_oimm *oimm)
+{
+ struct nv50_disp_cursor_v0 args = {
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK104_DISP_OVERLAY,
+ GF110_DISP_OVERLAY,
+ GT214_DISP_OVERLAY,
+ G82_DISP_OVERLAY,
+ NV50_DISP_OVERLAY,
+ 0
+ };
+
+ return nv50_pioc_create(disp, oclass, head, &args, sizeof(args),
+ &oimm->base);
}
/******************************************************************************
@@ -131,6 +167,9 @@ struct nv50_dmac {
dma_addr_t handle;
u32 *ptr;
+ struct nvif_object sync;
+ struct nvif_object vram;
+
/* Protects against concurrent pushbuf access to this channel, lock is
* grabbed by evo_wait (if the pushbuf reservation is successful) and
* dropped again by evo_kick. */
@@ -138,207 +177,113 @@ struct nv50_dmac {
};
static void
-nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
+nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
{
+ nvif_object_fini(&dmac->vram);
+ nvif_object_fini(&dmac->sync);
+
+ nv50_chan_destroy(&dmac->base);
+
if (dmac->ptr) {
- struct pci_dev *pdev = nv_device(core)->pdev;
+ struct pci_dev *pdev = nvkm_device(nvif_device(disp))->pdev;
pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
}
-
- nv50_chan_destroy(core, &dmac->base);
-}
-
-static int
-nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
-{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NV50_DMA_CONF0_ENABLE |
- NV50_DMA_CONF0_PART_256,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB16,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
- NV50_DMA_CONF0_PART_256,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB32,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
- NV50_DMA_CONF0_PART_256,
- }, sizeof(struct nv_dma_class), &object);
- return ret;
-}
-
-static int
-nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
-{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVC0_DMA_CONF0_ENABLE,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB16,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB32,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
- }, sizeof(struct nv_dma_class), &object);
- return ret;
-}
-
-static int
-nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
-{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVD0_DMA_CONF0_ENABLE |
- NVD0_DMA_CONF0_PAGE_LP,
- }, sizeof(struct nv_dma_class), &object);
- if (ret)
- return ret;
-
- ret = nouveau_object_new(client, parent, NvEvoFB32,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
- .start = 0,
- .limit = pfb->ram->size - 1,
- .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
- NVD0_DMA_CONF0_PAGE_LP,
- }, sizeof(struct nv_dma_class), &object);
- return ret;
}
static int
-nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
+nv50_dmac_create(struct nvif_object *disp, const u32 *oclass, u8 head,
void *data, u32 size, u64 syncbuf,
struct nv50_dmac *dmac)
{
- struct nouveau_fb *pfb = nouveau_fb(core);
- struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
- struct nouveau_object *object;
- u32 pushbuf = *(u32 *)data;
+ struct nvif_device *device = nvif_device(disp);
+ struct nv50_disp_core_channel_dma_v0 *args = data;
+ struct nvif_object pushbuf;
int ret;
mutex_init(&dmac->lock);
- dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
- &dmac->handle);
+ dmac->ptr = pci_alloc_consistent(nvkm_device(device)->pdev,
+ PAGE_SIZE, &dmac->handle);
if (!dmac->ptr)
return -ENOMEM;
- ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
- NV_DMA_FROM_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_PCI_US |
- NV_DMA_ACCESS_RD,
+ ret = nvif_object_init(nvif_object(device), NULL,
+ args->pushbuf, NV_DMA_FROM_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_PCI_US,
+ .access = NV_DMA_V0_ACCESS_RD,
.start = dmac->handle + 0x0000,
.limit = dmac->handle + 0x0fff,
- }, sizeof(struct nv_dma_class), &object);
+ }, sizeof(struct nv_dma_v0), &pushbuf);
if (ret)
return ret;
- ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
+ ret = nv50_chan_create(disp, oclass, head, data, size, &dmac->base);
+ nvif_object_fini(&pushbuf);
if (ret)
return ret;
- ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000000,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = syncbuf + 0x0000,
.limit = syncbuf + 0x0fff,
- }, sizeof(struct nv_dma_class), &object);
+ }, sizeof(struct nv_dma_v0),
+ &dmac->sync);
if (ret)
return ret;
- ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
- NV_DMA_IN_MEMORY_CLASS,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(&dmac->base.user, NULL, 0xf0000001,
+ NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = 0,
- .limit = pfb->ram->size - 1,
- }, sizeof(struct nv_dma_class), &object);
+ .limit = device->info.ram_user - 1,
+ }, sizeof(struct nv_dma_v0),
+ &dmac->vram);
if (ret)
return ret;
- if (nv_device(core)->card_type < NV_C0)
- ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
- else
- if (nv_device(core)->card_type < NV_D0)
- ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
- else
- ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
return ret;
}
+/******************************************************************************
+ * Core
+ *****************************************************************************/
+
struct nv50_mast {
struct nv50_dmac base;
};
-struct nv50_curs {
- struct nv50_pioc base;
-};
+static int
+nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
+{
+ struct nv50_disp_core_channel_dma_v0 args = {
+ .pushbuf = 0xb0007d00,
+ };
+ static const u32 oclass[] = {
+ GM107_DISP_CORE_CHANNEL_DMA,
+ GK110_DISP_CORE_CHANNEL_DMA,
+ GK104_DISP_CORE_CHANNEL_DMA,
+ GF110_DISP_CORE_CHANNEL_DMA,
+ GT214_DISP_CORE_CHANNEL_DMA,
+ GT206_DISP_CORE_CHANNEL_DMA,
+ GT200_DISP_CORE_CHANNEL_DMA,
+ G82_DISP_CORE_CHANNEL_DMA,
+ NV50_DISP_CORE_CHANNEL_DMA,
+ 0
+ };
+
+ return nv50_dmac_create(disp, oclass, 0, &args, sizeof(args), syncbuf,
+ &core->base);
+}
+
+/******************************************************************************
+ * Base
+ *****************************************************************************/
struct nv50_sync {
struct nv50_dmac base;
@@ -346,13 +291,58 @@ struct nv50_sync {
u32 data;
};
+static int
+nv50_base_create(struct nvif_object *disp, int head, u64 syncbuf,
+ struct nv50_sync *base)
+{
+ struct nv50_disp_base_channel_dma_v0 args = {
+ .pushbuf = 0xb0007c00 | head,
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK110_DISP_BASE_CHANNEL_DMA,
+ GK104_DISP_BASE_CHANNEL_DMA,
+ GF110_DISP_BASE_CHANNEL_DMA,
+ GT214_DISP_BASE_CHANNEL_DMA,
+ GT200_DISP_BASE_CHANNEL_DMA,
+ G82_DISP_BASE_CHANNEL_DMA,
+ NV50_DISP_BASE_CHANNEL_DMA,
+ 0
+ };
+
+ return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+ syncbuf, &base->base);
+}
+
+/******************************************************************************
+ * Overlay
+ *****************************************************************************/
+
struct nv50_ovly {
struct nv50_dmac base;
};
-struct nv50_oimm {
- struct nv50_pioc base;
-};
+static int
+nv50_ovly_create(struct nvif_object *disp, int head, u64 syncbuf,
+ struct nv50_ovly *ovly)
+{
+ struct nv50_disp_overlay_channel_dma_v0 args = {
+ .pushbuf = 0xb0007e00 | head,
+ .head = head,
+ };
+ static const u32 oclass[] = {
+ GK104_DISP_OVERLAY_CONTROL_DMA,
+ GF110_DISP_OVERLAY_CONTROL_DMA,
+ GT214_DISP_OVERLAY_CHANNEL_DMA,
+ GT200_DISP_OVERLAY_CHANNEL_DMA,
+ G82_DISP_OVERLAY_CHANNEL_DMA,
+ NV50_DISP_OVERLAY_CHANNEL_DMA,
+ 0
+ };
+
+ return nv50_dmac_create(disp, oclass, head, &args, sizeof(args),
+ syncbuf, &ovly->base);
+}
struct nv50_head {
struct nouveau_crtc base;
@@ -369,13 +359,19 @@ struct nv50_head {
#define nv50_ovly(c) (&nv50_head(c)->ovly)
#define nv50_oimm(c) (&nv50_head(c)->oimm)
#define nv50_chan(c) (&(c)->base.base)
-#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
+#define nv50_vers(c) nv50_chan(c)->user.oclass
+
+struct nv50_fbdma {
+ struct list_head head;
+ struct nvif_object core;
+ struct nvif_object base[4];
+};
struct nv50_disp {
- struct nouveau_object *core;
+ struct nvif_object *disp;
struct nv50_mast mast;
- u32 modeset;
+ struct list_head fbdma;
struct nouveau_bo *sync;
};
@@ -401,16 +397,16 @@ static u32 *
evo_wait(void *evoc, int nr)
{
struct nv50_dmac *dmac = evoc;
- u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
+ u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
- nv_wo32(dmac->base.user, 0x0000, 0x00000000);
- if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
+ nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
+ if (!nvkm_wait(&dmac->base.user, 0x0004, ~0, 0x00000000)) {
mutex_unlock(&dmac->lock);
- NV_ERROR(dmac->base.user, "channel stalled\n");
+ nv_error(nvkm_object(&dmac->base.user), "channel stalled\n");
return NULL;
}
@@ -424,7 +420,7 @@ static void
evo_kick(u32 *push, void *evoc)
{
struct nv50_dmac *dmac = evoc;
- nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
+ nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
}
@@ -443,7 +439,7 @@ evo_sync_wait(void *data)
static int
evo_sync(struct drm_device *dev)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_mast *mast = nv50_mast(dev);
u32 *push = evo_wait(mast, 8);
@@ -455,7 +451,7 @@ evo_sync(struct drm_device *dev)
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_kick(push, mast);
- if (nv_wait_cb(device, evo_sync_wait, disp->sync))
+ if (nv_wait_cb(nvkm_device(device), evo_sync_wait, disp->sync))
return 0;
}
@@ -490,7 +486,7 @@ nv50_display_flip_wait(void *data)
void
nv50_display_flip_stop(struct drm_crtc *crtc)
{
- struct nouveau_device *device = nouveau_dev(crtc->dev);
+ struct nvif_device *device = &nouveau_drm(crtc->dev)->device;
struct nv50_display_flip flip = {
.disp = nv50_disp(crtc->dev),
.chan = nv50_sync(crtc),
@@ -510,7 +506,7 @@ nv50_display_flip_stop(struct drm_crtc *crtc)
evo_kick(push, flip.chan);
}
- nv_wait_cb(device, nv50_display_flip_wait, &flip);
+ nv_wait_cb(nvkm_device(device), nv50_display_flip_wait, &flip);
}
int
@@ -534,7 +530,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (unlikely(push == NULL))
return -EBUSY;
- if (chan && nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
+ if (chan && chan->object->oclass < G82_CHANNEL_GPFIFO) {
ret = RING_SPACE(chan, 8);
if (ret)
return ret;
@@ -548,14 +544,14 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
OUT_RING (chan, sync->addr);
OUT_RING (chan, sync->data);
} else
- if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
+ if (chan && chan->object->oclass < FERMI_CHANNEL_GPFIFO) {
u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr;
ret = RING_SPACE(chan, 12);
if (ret)
return ret;
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, chan->vram.handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(addr ^ 0x10));
OUT_RING (chan, lower_32_bits(addr ^ 0x10));
@@ -606,16 +602,16 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
evo_data(push, sync->addr);
evo_data(push, sync->data++);
evo_data(push, sync->data);
- evo_data(push, NvEvoSync);
+ evo_data(push, sync->base.sync.handle);
evo_mthd(push, 0x00a0, 2);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_mthd(push, 0x00c0, 1);
- evo_data(push, nv_fb->r_dma);
+ evo_data(push, nv_fb->r_handle);
evo_mthd(push, 0x0110, 2);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
- if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
+ if (nv50_vers(sync) < GF110_DISP_BASE_CHANNEL_DMA) {
evo_mthd(push, 0x0800, 5);
evo_data(push, nv_fb->nvbo->bo.offset >> 8);
evo_data(push, 0);
@@ -667,11 +663,11 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
push = evo_wait(mast, 4);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
evo_data(push, mode);
} else
- if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GK104_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
evo_data(push, mode);
} else {
@@ -762,7 +758,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
push = evo_wait(mast, 8);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
/*XXX: SCALE_CTRL_ACTIVE??? */
evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
evo_data(push, (oY << 16) | oX);
@@ -807,7 +803,7 @@ nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
evo_data(push, (hue << 20) | (vib << 8));
} else {
@@ -835,7 +831,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
evo_data(push, nvfb->nvbo->bo.offset >> 8);
evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
@@ -844,9 +840,9 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_data(push, nvfb->r_format);
evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
evo_data(push, (y << 16) | x);
- if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) > NV50_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nvfb->r_dma);
+ evo_data(push, nvfb->r_handle);
}
} else {
evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
@@ -855,7 +851,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_data(push, (fb->height << 16) | fb->width);
evo_data(push, nvfb->r_pitch);
evo_data(push, nvfb->r_format);
- evo_data(push, nvfb->r_dma);
+ evo_data(push, nvfb->r_handle);
evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
evo_data(push, (y << 16) | x);
}
@@ -867,7 +863,7 @@ nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_kick(push, mast);
}
- nv_crtc->fb.tile_flags = nvfb->r_dma;
+ nv_crtc->fb.handle = nvfb->r_handle;
return 0;
}
@@ -877,23 +873,23 @@ nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
u32 *push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x85000000);
evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
} else {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
evo_data(push, 0x85000000);
evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
}
evo_kick(push, mast);
}
@@ -905,11 +901,11 @@ nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
u32 *push = evo_wait(mast, 16);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x05000000);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x05000000);
evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
@@ -960,13 +956,13 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
push = evo_wait(mast, 6);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x40000000);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
@@ -997,31 +993,31 @@ nv50_crtc_commit(struct drm_crtc *crtc)
push = evo_wait(mast, 32);
if (push) {
- if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, NvEvoVRAM_LP);
+ evo_data(push, nv_crtc->fb.handle);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0xc0000000);
evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
} else
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
+ evo_data(push, nv_crtc->fb.handle);
evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0xc0000000);
evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
} else {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
- evo_data(push, nv_crtc->fb.tile_flags);
+ evo_data(push, nv_crtc->fb.handle);
evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
evo_data(push, 0x83000000);
evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
- evo_data(push, NvEvoVRAM);
+ evo_data(push, mast->base.vram.handle);
evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0xffffff00);
}
@@ -1099,7 +1095,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
push = evo_wait(mast, 64);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
evo_data(push, 0x00800000 | mode->clock);
evo_data(push, (ilace == 2) ? 2 : 0);
@@ -1192,7 +1188,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
u16 g = nv_crtc->lut.g[i] >> 2;
u16 b = nv_crtc->lut.b[i] >> 2;
- if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
+ if (disp->disp->oclass < GF110_DISP) {
writew(r + 0x0000, lut + (i * 0x08) + 0);
writew(g + 0x0000, lut + (i * 0x08) + 2);
writew(b + 0x0000, lut + (i * 0x08) + 4);
@@ -1259,8 +1255,8 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct nv50_curs *curs = nv50_curs(crtc);
struct nv50_chan *chan = nv50_chan(curs);
- nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
- nv_wo32(chan->user, 0x0080, 0x00000000);
+ nvif_wr32(&chan->user, 0x0084, (y << 16) | (x & 0xffff));
+ nvif_wr32(&chan->user, 0x0080, 0x00000000);
return 0;
}
@@ -1287,11 +1283,16 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
+ struct nv50_fbdma *fbdma;
+
+ list_for_each_entry(fbdma, &disp->fbdma, head) {
+ nvif_object_fini(&fbdma->base[nv_crtc->index]);
+ }
- nv50_dmac_destroy(disp->core, &head->ovly.base);
- nv50_pioc_destroy(disp->core, &head->oimm.base);
- nv50_dmac_destroy(disp->core, &head->sync.base);
- nv50_pioc_destroy(disp->core, &head->curs.base);
+ nv50_dmac_destroy(&head->ovly.base, disp->disp);
+ nv50_pioc_destroy(&head->oimm.base);
+ nv50_dmac_destroy(&head->sync.base, disp->disp);
+ nv50_pioc_destroy(&head->curs.base);
/*XXX: this shouldn't be necessary, but the core doesn't call
* disconnect() during the cleanup paths
@@ -1346,7 +1347,7 @@ nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
}
static int
-nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
+nv50_crtc_create(struct drm_device *dev, int index)
{
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_head *head;
@@ -1395,11 +1396,7 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
nv50_crtc_lut_load(crtc);
/* allocate cursor resources */
- ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
- &(struct nv50_display_curs_class) {
- .head = index,
- }, sizeof(struct nv50_display_curs_class),
- &head->curs.base);
+ ret = nv50_curs_create(disp->disp, index, &head->curs);
if (ret)
goto out;
@@ -1420,12 +1417,8 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
goto out;
/* allocate page flip / sync resources */
- ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
- &(struct nv50_display_sync_class) {
- .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
- .head = index,
- }, sizeof(struct nv50_display_sync_class),
- disp->sync->bo.offset, &head->sync.base);
+ ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
+ &head->sync);
if (ret)
goto out;
@@ -1433,20 +1426,12 @@ nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
head->sync.data = 0x00000000;
/* allocate overlay resources */
- ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
- &(struct nv50_display_oimm_class) {
- .head = index,
- }, sizeof(struct nv50_display_oimm_class),
- &head->oimm.base);
+ ret = nv50_oimm_create(disp->disp, index, &head->oimm);
if (ret)
goto out;
- ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
- &(struct nv50_display_ovly_class) {
- .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
- .head = index,
- }, sizeof(struct nv50_display_ovly_class),
- disp->sync->bo.offset, &head->ovly.base);
+ ret = nv50_ovly_create(disp->disp, index, disp->sync->bo.offset,
+ &head->ovly);
if (ret)
goto out;
@@ -1464,16 +1449,23 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- int or = nv_encoder->or;
- u32 dpms_ctrl;
-
- dpms_ctrl = 0x00000000;
- if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000001;
- if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
- dpms_ctrl |= 0x00000004;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_dac_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_DAC_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = 1,
+ .pwr.data = 1,
+ .pwr.vsync = (mode != DRM_MODE_DPMS_SUSPEND &&
+ mode != DRM_MODE_DPMS_OFF),
+ .pwr.hsync = (mode != DRM_MODE_DPMS_STANDBY &&
+ mode != DRM_MODE_DPMS_OFF),
+ };
- nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
static bool
@@ -1514,7 +1506,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
push = evo_wait(mast, 8);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
u32 syncs = 0x00000000;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
@@ -1563,7 +1555,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
push = evo_wait(mast, 4);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0400 + (or * 0x080), 1);
evo_data(push, 0x00000000);
} else {
@@ -1580,14 +1572,25 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- int ret, or = nouveau_encoder(encoder)->or;
- u32 load = nouveau_drm(encoder->dev)->vbios.dactestval;
- if (load == 0)
- load = 340;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_dac_load_v0 load;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
+ int ret;
+
+ args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
+ if (args.load.data == 0)
+ args.load.data = 340;
- ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
- if (ret || !load)
+ ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ if (ret || !args.load.load)
return connector_status_disconnected;
return connector_status_connected;
@@ -1619,7 +1622,7 @@ static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type = DRM_MODE_ENCODER_DAC;
@@ -1650,16 +1653,25 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ u8 data[sizeof(nv_connector->base.eld)];
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
- nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
- nv_connector->base.eld,
- nv_connector->base.eld[2] * 4);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
static void
@@ -1667,8 +1679,17 @@ nv50_audio_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hda_eld_v0 eld;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
- nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
/******************************************************************************
@@ -1679,10 +1700,20 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
- struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
- const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
- u32 rekey = 56; /* binary driver, and tegra constant */
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ .pwr.state = 1,
+ .pwr.rekey = 56, /* binary driver, and tegra, constant */
+ };
+ struct nouveau_connector *nv_connector;
u32 max_ac_packet;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
@@ -1690,14 +1721,11 @@ nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
return;
max_ac_packet = mode->htotal - mode->hdisplay;
- max_ac_packet -= rekey;
+ max_ac_packet -= args.pwr.rekey;
max_ac_packet -= 18; /* constant from tegra */
- max_ac_packet /= 32;
-
- nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
- NV84_DISP_SOR_HDMI_PWR_STATE_ON |
- (max_ac_packet << 16) | rekey);
+ args.pwr.max_ac_packet = max_ac_packet / 32;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
nv50_audio_mode_set(encoder, mode);
}
@@ -1706,11 +1734,20 @@ nv50_hdmi_disconnect(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_hdmi_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
+ (0x0100 << nv_crtc->index),
+ };
nv50_audio_disconnect(encoder);
- nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
/******************************************************************************
@@ -1720,10 +1757,29 @@ static void
nv50_sor_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = mode == DRM_MODE_DPMS_ON,
+ };
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_dp_pwr_v0 pwr;
+ } link = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_DP_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = mode == DRM_MODE_DPMS_ON,
+ };
struct drm_device *dev = encoder->dev;
- struct nv50_disp *disp = nv50_disp(dev);
struct drm_encoder *partner;
- u32 mthd;
nv_encoder->last_dpms = mode;
@@ -1741,18 +1797,13 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
}
}
- mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
- mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
- mthd |= nv_encoder->or;
-
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- nv_call(disp->core, NV50_DISP_SOR_PWR | mthd, 1);
- mthd |= NV94_DISP_SOR_DP_PWR;
+ args.pwr.state = 1;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
+ nvif_mthd(disp->disp, 0, &link, sizeof(link));
} else {
- mthd |= NV50_DISP_SOR_PWR;
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
-
- nv_call(disp->core, mthd, (mode == DRM_MODE_DPMS_ON));
}
static bool
@@ -1781,7 +1832,7 @@ nv50_sor_ctrl(struct nouveau_encoder *nv_encoder, u32 mask, u32 data)
struct nv50_mast *mast = nv50_mast(nv_encoder->base.base.dev);
u32 temp = (nv_encoder->ctrl & ~mask) | (data & mask), *push;
if (temp != nv_encoder->ctrl && (push = evo_wait(mast, 2))) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
evo_data(push, (nv_encoder->ctrl = temp));
} else {
@@ -1817,15 +1868,24 @@ static void
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
struct drm_display_mode *mode)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_sor_lvds_script_v0 lvds;
+ } lvds = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ };
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nv50_mast *mast = nv50_mast(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
- u32 lvds = 0, mask, ctrl;
+ u32 mask, ctrl;
u8 owner = 1 << nv_crtc->index;
u8 proto = 0xf;
u8 depth = 0x0;
@@ -1851,31 +1911,31 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
- lvds |= 0x0100;
+ lvds.lvds.script |= 0x0100;
if (bios->fp.if_is_24bit)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
} else {
if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
- lvds |= 0x0100;
+ lvds.lvds.script |= 0x0100;
} else
if (mode->clock >= bios->fp.duallink_transition_clk) {
- lvds |= 0x0100;
+ lvds.lvds.script |= 0x0100;
}
- if (lvds & 0x0100) {
+ if (lvds.lvds.script & 0x0100) {
if (bios->fp.strapless_is_24bit & 2)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
} else {
if (bios->fp.strapless_is_24bit & 1)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
}
if (nv_connector->base.display_info.bpc == 8)
- lvds |= 0x0200;
+ lvds.lvds.script |= 0x0200;
}
- nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
+ nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
if (nv_connector->base.display_info.bpc == 6) {
@@ -1902,7 +1962,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nv50_sor_dpms(&nv_encoder->base.base, DRM_MODE_DPMS_ON);
- if (nv50_vers(mast) >= NVD0_DISP_CLASS) {
+ if (nv50_vers(mast) >= GF110_DISP) {
u32 *push = evo_wait(mast, 3);
if (push) {
u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
@@ -1961,7 +2021,7 @@ static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
@@ -2002,9 +2062,19 @@ nv50_pior_dpms(struct drm_encoder *encoder, int mode)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
- u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or;
- u32 ctrl = (mode == DRM_MODE_DPMS_ON);
- nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl);
+ struct {
+ struct nv50_disp_mthd_v1 base;
+ struct nv50_disp_pior_pwr_v0 pwr;
+ } args = {
+ .base.version = 1,
+ .base.method = NV50_DISP_MTHD_V1_PIOR_PWR,
+ .base.hasht = nv_encoder->dcb->hasht,
+ .base.hashm = nv_encoder->dcb->hashm,
+ .pwr.state = mode == DRM_MODE_DPMS_ON,
+ .pwr.type = nv_encoder->dcb->type,
+ };
+
+ nvif_mthd(disp->disp, 0, &args, sizeof(args));
}
static bool
@@ -2067,7 +2137,7 @@ nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
push = evo_wait(mast, 8);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
u32 ctrl = (depth << 16) | (proto << 8) | owner;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl |= 0x00001000;
@@ -2096,7 +2166,7 @@ nv50_pior_disconnect(struct drm_encoder *encoder)
push = evo_wait(mast, 4);
if (push) {
- if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
+ if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0700 + (or * 0x040), 1);
evo_data(push, 0x00000000);
}
@@ -2132,7 +2202,7 @@ static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
+ struct nouveau_i2c *i2c = nvkm_i2c(&drm->device);
struct nouveau_i2c_port *ddc = NULL;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
@@ -2169,8 +2239,151 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
}
/******************************************************************************
+ * Framebuffer
+ *****************************************************************************/
+
+static void
+nv50_fbdma_fini(struct nv50_fbdma *fbdma)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(fbdma->base); i++)
+ nvif_object_fini(&fbdma->base[i]);
+ nvif_object_fini(&fbdma->core);
+ list_del(&fbdma->head);
+ kfree(fbdma);
+}
+
+static int
+nv50_fbdma_init(struct drm_device *dev, u32 name, u64 offset, u64 length, u8 kind)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_mast *mast = nv50_mast(dev);
+ struct __attribute__ ((packed)) {
+ struct nv_dma_v0 base;
+ union {
+ struct nv50_dma_v0 nv50;
+ struct gf100_dma_v0 gf100;
+ struct gf110_dma_v0 gf110;
+ };
+ } args = {};
+ struct nv50_fbdma *fbdma;
+ struct drm_crtc *crtc;
+ u32 size = sizeof(args.base);
+ int ret;
+
+ list_for_each_entry(fbdma, &disp->fbdma, head) {
+ if (fbdma->core.handle == name)
+ return 0;
+ }
+
+ fbdma = kzalloc(sizeof(*fbdma), GFP_KERNEL);
+ if (!fbdma)
+ return -ENOMEM;
+ list_add(&fbdma->head, &disp->fbdma);
+
+ args.base.target = NV_DMA_V0_TARGET_VRAM;
+ args.base.access = NV_DMA_V0_ACCESS_RDWR;
+ args.base.start = offset;
+ args.base.limit = offset + length - 1;
+
+ if (drm->device.info.chipset < 0x80) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ size += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xc0) {
+ args.nv50.part = NV50_DMA_V0_PART_256;
+ args.nv50.kind = kind;
+ size += sizeof(args.nv50);
+ } else
+ if (drm->device.info.chipset < 0xd0) {
+ args.gf100.kind = kind;
+ size += sizeof(args.gf100);
+ } else {
+ args.gf110.page = GF110_DMA_V0_PAGE_LP;
+ args.gf110.kind = kind;
+ size += sizeof(args.gf110);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nv50_head *head = nv50_head(crtc);
+ int ret = nvif_object_init(&head->sync.base.base.user, NULL,
+ name, NV_DMA_IN_MEMORY, &args, size,
+ &fbdma->base[head->base.index]);
+ if (ret) {
+ nv50_fbdma_fini(fbdma);
+ return ret;
+ }
+ }
+
+ ret = nvif_object_init(&mast->base.base.user, NULL, name,
+ NV_DMA_IN_MEMORY, &args, size,
+ &fbdma->core);
+ if (ret) {
+ nv50_fbdma_fini(fbdma);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+nv50_fb_dtor(struct drm_framebuffer *fb)
+{
+}
+
+static int
+nv50_fb_ctor(struct drm_framebuffer *fb)
+{
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
+ struct nouveau_bo *nvbo = nv_fb->nvbo;
+ struct nv50_disp *disp = nv50_disp(fb->dev);
+ u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
+ u8 tile = nvbo->tile_mode;
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+ NV_ERROR(drm, "framebuffer requires contiguous bo\n");
+ return -EINVAL;
+ }
+
+ if (drm->device.info.chipset >= 0xc0)
+ tile >>= 4; /* yep.. */
+
+ switch (fb->depth) {
+ case 8: nv_fb->r_format = 0x1e00; break;
+ case 15: nv_fb->r_format = 0xe900; break;
+ case 16: nv_fb->r_format = 0xe800; break;
+ case 24:
+ case 32: nv_fb->r_format = 0xcf00; break;
+ case 30: nv_fb->r_format = 0xd100; break;
+ default:
+ NV_ERROR(drm, "unknown depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ if (disp->disp->oclass < G82_DISP) {
+ nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
+ (fb->pitches[0] | 0x00100000);
+ nv_fb->r_format |= kind << 16;
+ } else
+ if (disp->disp->oclass < GF110_DISP) {
+ nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
+ (fb->pitches[0] | 0x00100000);
+ } else {
+ nv_fb->r_pitch = kind ? (((fb->pitches[0] / 4) << 4) | tile) :
+ (fb->pitches[0] | 0x01000000);
+ }
+ nv_fb->r_handle = 0xffff0000 | kind;
+
+ return nv50_fbdma_init(fb->dev, nv_fb->r_handle, 0,
+ drm->device.info.ram_user, kind);
+}
+
+/******************************************************************************
* Init
*****************************************************************************/
+
void
nv50_display_fini(struct drm_device *dev)
{
@@ -2193,7 +2406,7 @@ nv50_display_init(struct drm_device *dev)
}
evo_mthd(push, 0x0088, 1);
- evo_data(push, NvEvoSync);
+ evo_data(push, nv50_mast(dev)->base.sync.handle);
evo_kick(push, nv50_mast(dev));
return 0;
}
@@ -2202,8 +2415,13 @@ void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
+ struct nv50_fbdma *fbdma, *fbtmp;
+
+ list_for_each_entry_safe(fbdma, fbtmp, &disp->fbdma, head) {
+ nv50_fbdma_fini(fbdma);
+ }
- nv50_dmac_destroy(disp->core, &disp->mast.base);
+ nv50_dmac_destroy(&disp->mast.base, disp->disp);
nouveau_bo_unmap(disp->sync);
if (disp->sync)
@@ -2217,7 +2435,7 @@ nv50_display_destroy(struct drm_device *dev)
int
nv50_display_create(struct drm_device *dev)
{
- struct nouveau_device *device = nouveau_dev(dev);
+ struct nvif_device *device = &nouveau_drm(dev)->device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *tmp;
@@ -2228,12 +2446,15 @@ nv50_display_create(struct drm_device *dev)
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
+ INIT_LIST_HEAD(&disp->fbdma);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
- disp->core = nouveau_display(dev)->core;
+ nouveau_display(dev)->fb_ctor = nv50_fb_ctor;
+ nouveau_display(dev)->fb_dtor = nv50_fb_dtor;
+ disp->disp = &nouveau_display(dev)->disp;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -2253,22 +2474,19 @@ nv50_display_create(struct drm_device *dev)
goto out;
/* allocate master evo channel */
- ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
- &(struct nv50_display_mast_class) {
- .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
- }, sizeof(struct nv50_display_mast_class),
- disp->sync->bo.offset, &disp->mast.base);
+ ret = nv50_core_create(disp->disp, disp->sync->bo.offset,
+ &disp->mast);
if (ret)
goto out;
/* create crtc objects to represent the hw heads */
- if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
- crtcs = nv_rd32(device, 0x022448);
+ if (disp->disp->oclass >= GF110_DISP)
+ crtcs = nvif_rd32(device, 0x022448);
else
crtcs = 2;
for (i = 0; i < crtcs; i++) {
- ret = nv50_crtc_create(dev, disp->core, i);
+ ret = nv50_crtc_create(dev, i);
if (ret)
goto out;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 52068a0910dc..394c89abcc97 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -154,7 +154,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = nfbdev->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
- struct nouveau_object *object;
int ret, format;
switch (info->var.bits_per_pixel) {
@@ -184,8 +183,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
- 0x502d, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x502d, 0x502d, NULL, 0,
+ &nfbdev->twod);
if (ret)
return ret;
@@ -196,11 +195,11 @@ nv50_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
- OUT_RING(chan, Nv2D);
+ OUT_RING(chan, nfbdev->twod.handle);
BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
- OUT_RING(chan, NvDmaFB);
- OUT_RING(chan, NvDmaFB);
- OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, chan->vram.handle);
+ OUT_RING(chan, chan->vram.handle);
+ OUT_RING(chan, chan->vram.handle);
BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
OUT_RING(chan, 0);
BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 0ee363840035..195cf51a7c31 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -22,8 +22,8 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include <core/object.h>
-#include <core/class.h>
+#include <nvif/os.h>
+#include <nvif/class.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
@@ -38,7 +38,6 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
int ret, i;
@@ -52,15 +51,14 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(chan->object, NULL, NvSema, NV_DMA_IN_MEMORY,
+ &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &fctx->sema);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
@@ -68,15 +66,14 @@ nv50_fence_context_new(struct nouveau_channel *chan)
u32 start = bo->bo.mem.start * PAGE_SIZE;
u32 limit = start + bo->bo.mem.size - 1;
- ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvEvoSema0 + i, 0x003d,
- &(struct nv_dma_class) {
- .flags = NV_DMA_TARGET_VRAM |
- NV_DMA_ACCESS_RDWR,
+ ret = nvif_object_init(chan->object, NULL, NvEvoSema0 + i,
+ NV_DMA_IN_MEMORY, &(struct nv_dma_v0) {
+ .target = NV_DMA_V0_TARGET_VRAM,
+ .access = NV_DMA_V0_ACCESS_RDWR,
.start = start,
.limit = limit,
- }, sizeof(struct nv_dma_class),
- &object);
+ }, sizeof(struct nv_dma_v0),
+ &fctx->head[i]);
}
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 9fd475c89820..933a779c93ab 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,12 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/class.h>
-
-#include <engine/fifo.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -47,7 +41,7 @@ nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
int ret = RING_SPACE(chan, 8);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, chan->vram.handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
OUT_RING (chan, upper_32_bits(virtual));
OUT_RING (chan, lower_32_bits(virtual));
@@ -65,7 +59,7 @@ nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
- OUT_RING (chan, chan->vram);
+ OUT_RING (chan, chan->vram.handle);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
OUT_RING (chan, upper_32_bits(virtual));
OUT_RING (chan, lower_32_bits(virtual));
@@ -81,8 +75,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct nv84_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- u64 addr = fifo->chid * 16;
+ u64 addr = chan->chid * 16;
if (fence->sysmem)
addr += fctx->vma_gart.offset;
@@ -97,8 +90,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
struct nv84_fence_chan *fctx = chan->fence;
- struct nouveau_fifo_chan *fifo = (void *)prev->object;
- u64 addr = fifo->chid * 16;
+ u64 addr = prev->chid * 16;
if (fence->sysmem)
addr += fctx->vma_gart.offset;
@@ -111,9 +103,8 @@ nv84_fence_sync(struct nouveau_fence *fence,
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
struct nv84_fence_priv *priv = chan->drm->fence;
- return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
+ return nouveau_bo_rd32(priv->bo, chan->chid * 16/4);
}
static void
@@ -139,8 +130,7 @@ nv84_fence_context_del(struct nouveau_channel *chan)
int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct nouveau_fifo_chan *fifo = (void *)chan->object;
- struct nouveau_client *client = nouveau_client(fifo);
+ struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
int ret, i;
@@ -156,19 +146,19 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.emit32 = nv84_fence_emit32;
fctx->base.sync32 = nv84_fence_sync32;
- ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
+ ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
- ret = nouveau_bo_vma_add(priv->bo_gart, client->vm,
+ ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
+ ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]);
}
- nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
+ nouveau_bo_wr32(priv->bo, chan->chid * 16/4, 0x00000000);
if (ret)
nv84_fence_context_del(chan);
@@ -178,7 +168,7 @@ nv84_fence_context_new(struct nouveau_channel *chan)
static bool
nv84_fence_suspend(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
struct nv84_fence_priv *priv = drm->fence;
int i;
@@ -194,7 +184,7 @@ nv84_fence_suspend(struct nouveau_drm *drm)
static void
nv84_fence_resume(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
struct nv84_fence_priv *priv = drm->fence;
int i;
@@ -225,7 +215,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
int
nv84_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
struct nv84_fence_priv *priv;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 9dcd30f3e1e0..61246677e8dc 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -154,11 +154,10 @@ nvc0_fbcon_accel_init(struct fb_info *info)
struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
- struct nouveau_object *object;
int ret, format;
- ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
- 0x902d, NULL, 0, &object);
+ ret = nvif_object_init(chan->object, NULL, 0x902d, 0x902d, NULL, 0,
+ &nfbdev->twod);
if (ret)
return ret;
@@ -197,7 +196,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
}
BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
- OUT_RING (chan, 0x0000902d);
+ OUT_RING (chan, nfbdev->twod.handle);
BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 9566267fbc42..becf19abda2d 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,12 +22,6 @@
* Authors: Ben Skeggs
*/
-#include <core/object.h>
-#include <core/client.h>
-#include <core/class.h>
-
-#include <engine/fifo.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
new file mode 100644
index 000000000000..cc81e0e5fd30
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -0,0 +1,558 @@
+#ifndef __NVIF_CLASS_H__
+#define __NVIF_CLASS_H__
+
+/*******************************************************************************
+ * class identifiers
+ ******************************************************************************/
+
+/* the below match nvidia-assigned (either in hw, or sw) class numbers */
+#define NV_DEVICE 0x00000080
+
+#define NV_DMA_FROM_MEMORY 0x00000002
+#define NV_DMA_TO_MEMORY 0x00000003
+#define NV_DMA_IN_MEMORY 0x0000003d
+
+#define NV04_DISP 0x00000046
+
+#define NV03_CHANNEL_DMA 0x0000006b
+#define NV10_CHANNEL_DMA 0x0000006e
+#define NV17_CHANNEL_DMA 0x0000176e
+#define NV40_CHANNEL_DMA 0x0000406e
+#define NV50_CHANNEL_DMA 0x0000506e
+#define G82_CHANNEL_DMA 0x0000826e
+
+#define NV50_CHANNEL_GPFIFO 0x0000506f
+#define G82_CHANNEL_GPFIFO 0x0000826f
+#define FERMI_CHANNEL_GPFIFO 0x0000906f
+#define KEPLER_CHANNEL_GPFIFO_A 0x0000a06f
+
+#define NV50_DISP 0x00005070
+#define G82_DISP 0x00008270
+#define GT200_DISP 0x00008370
+#define GT214_DISP 0x00008570
+#define GT206_DISP 0x00008870
+#define GF110_DISP 0x00009070
+#define GK104_DISP 0x00009170
+#define GK110_DISP 0x00009270
+#define GM107_DISP 0x00009470
+
+#define NV50_DISP_CURSOR 0x0000507a
+#define G82_DISP_CURSOR 0x0000827a
+#define GT214_DISP_CURSOR 0x0000857a
+#define GF110_DISP_CURSOR 0x0000907a
+#define GK104_DISP_CURSOR 0x0000917a
+
+#define NV50_DISP_OVERLAY 0x0000507b
+#define G82_DISP_OVERLAY 0x0000827b
+#define GT214_DISP_OVERLAY 0x0000857b
+#define GF110_DISP_OVERLAY 0x0000907b
+#define GK104_DISP_OVERLAY 0x0000917b
+
+#define NV50_DISP_BASE_CHANNEL_DMA 0x0000507c
+#define G82_DISP_BASE_CHANNEL_DMA 0x0000827c
+#define GT200_DISP_BASE_CHANNEL_DMA 0x0000837c
+#define GT214_DISP_BASE_CHANNEL_DMA 0x0000857c
+#define GF110_DISP_BASE_CHANNEL_DMA 0x0000907c
+#define GK104_DISP_BASE_CHANNEL_DMA 0x0000917c
+#define GK110_DISP_BASE_CHANNEL_DMA 0x0000927c
+
+#define NV50_DISP_CORE_CHANNEL_DMA 0x0000507d
+#define G82_DISP_CORE_CHANNEL_DMA 0x0000827d
+#define GT200_DISP_CORE_CHANNEL_DMA 0x0000837d
+#define GT214_DISP_CORE_CHANNEL_DMA 0x0000857d
+#define GT206_DISP_CORE_CHANNEL_DMA 0x0000887d
+#define GF110_DISP_CORE_CHANNEL_DMA 0x0000907d
+#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d
+#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d
+#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d
+
+#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e
+#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e
+#define GT200_DISP_OVERLAY_CHANNEL_DMA 0x0000837e
+#define GT214_DISP_OVERLAY_CHANNEL_DMA 0x0000857e
+#define GF110_DISP_OVERLAY_CONTROL_DMA 0x0000907e
+#define GK104_DISP_OVERLAY_CONTROL_DMA 0x0000917e
+
+#define FERMI_A 0x00009097
+#define FERMI_B 0x00009197
+#define FERMI_C 0x00009297
+
+#define KEPLER_A 0x0000a097
+#define KEPLER_B 0x0000a197
+#define KEPLER_C 0x0000a297
+
+#define MAXWELL_A 0x0000b097
+
+#define FERMI_COMPUTE_A 0x000090c0
+#define FERMI_COMPUTE_B 0x000091c0
+
+#define KEPLER_COMPUTE_A 0x0000a0c0
+#define KEPLER_COMPUTE_B 0x0000a1c0
+
+#define MAXWELL_COMPUTE_A 0x0000b0c0
+
+
+/*******************************************************************************
+ * client
+ ******************************************************************************/
+
+#define NV_CLIENT_DEVLIST 0x00
+
+struct nv_client_devlist_v0 {
+ __u8 version;
+ __u8 count;
+ __u8 pad02[6];
+ __u64 device[];
+};
+
+
+/*******************************************************************************
+ * device
+ ******************************************************************************/
+
+struct nv_device_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u64 device; /* device identifier, ~0 for client default */
+#define NV_DEVICE_V0_DISABLE_IDENTIFY 0x0000000000000001ULL
+#define NV_DEVICE_V0_DISABLE_MMIO 0x0000000000000002ULL
+#define NV_DEVICE_V0_DISABLE_VBIOS 0x0000000000000004ULL
+#define NV_DEVICE_V0_DISABLE_CORE 0x0000000000000008ULL
+#define NV_DEVICE_V0_DISABLE_DISP 0x0000000000010000ULL
+#define NV_DEVICE_V0_DISABLE_FIFO 0x0000000000020000ULL
+#define NV_DEVICE_V0_DISABLE_GRAPH 0x0000000100000000ULL
+#define NV_DEVICE_V0_DISABLE_MPEG 0x0000000200000000ULL
+#define NV_DEVICE_V0_DISABLE_ME 0x0000000400000000ULL
+#define NV_DEVICE_V0_DISABLE_VP 0x0000000800000000ULL
+#define NV_DEVICE_V0_DISABLE_CRYPT 0x0000001000000000ULL
+#define NV_DEVICE_V0_DISABLE_BSP 0x0000002000000000ULL
+#define NV_DEVICE_V0_DISABLE_PPP 0x0000004000000000ULL
+#define NV_DEVICE_V0_DISABLE_COPY0 0x0000008000000000ULL
+#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL
+#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
+#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL
+ __u64 disable; /* disable particular subsystems */
+ __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
+};
+
+#define NV_DEVICE_V0_INFO 0x00
+
+struct nv_device_info_v0 {
+ __u8 version;
+#define NV_DEVICE_INFO_V0_IGP 0x00
+#define NV_DEVICE_INFO_V0_PCI 0x01
+#define NV_DEVICE_INFO_V0_AGP 0x02
+#define NV_DEVICE_INFO_V0_PCIE 0x03
+#define NV_DEVICE_INFO_V0_SOC 0x04
+ __u8 platform;
+ __u16 chipset; /* from NV_PMC_BOOT_0 */
+ __u8 revision; /* from NV_PMC_BOOT_0 */
+#define NV_DEVICE_INFO_V0_TNT 0x01
+#define NV_DEVICE_INFO_V0_CELSIUS 0x02
+#define NV_DEVICE_INFO_V0_KELVIN 0x03
+#define NV_DEVICE_INFO_V0_RANKINE 0x04
+#define NV_DEVICE_INFO_V0_CURIE 0x05
+#define NV_DEVICE_INFO_V0_TESLA 0x06
+#define NV_DEVICE_INFO_V0_FERMI 0x07
+#define NV_DEVICE_INFO_V0_KEPLER 0x08
+#define NV_DEVICE_INFO_V0_MAXWELL 0x09
+ __u8 family;
+ __u8 pad06[2];
+ __u64 ram_size;
+ __u64 ram_user;
+};
+
+
+/*******************************************************************************
+ * context dma
+ ******************************************************************************/
+
+struct nv_dma_v0 {
+ __u8 version;
+#define NV_DMA_V0_TARGET_VM 0x00
+#define NV_DMA_V0_TARGET_VRAM 0x01
+#define NV_DMA_V0_TARGET_PCI 0x02
+#define NV_DMA_V0_TARGET_PCI_US 0x03
+#define NV_DMA_V0_TARGET_AGP 0x04
+ __u8 target;
+#define NV_DMA_V0_ACCESS_VM 0x00
+#define NV_DMA_V0_ACCESS_RD 0x01
+#define NV_DMA_V0_ACCESS_WR 0x02
+#define NV_DMA_V0_ACCESS_RDWR (NV_DMA_V0_ACCESS_RD | NV_DMA_V0_ACCESS_WR)
+ __u8 access;
+ __u8 pad03[5];
+ __u64 start;
+ __u64 limit;
+ /* ... chipset-specific class data */
+};
+
+struct nv50_dma_v0 {
+ __u8 version;
+#define NV50_DMA_V0_PRIV_VM 0x00
+#define NV50_DMA_V0_PRIV_US 0x01
+#define NV50_DMA_V0_PRIV__S 0x02
+ __u8 priv;
+#define NV50_DMA_V0_PART_VM 0x00
+#define NV50_DMA_V0_PART_256 0x01
+#define NV50_DMA_V0_PART_1KB 0x02
+ __u8 part;
+#define NV50_DMA_V0_COMP_NONE 0x00
+#define NV50_DMA_V0_COMP_1 0x01
+#define NV50_DMA_V0_COMP_2 0x02
+#define NV50_DMA_V0_COMP_VM 0x03
+ __u8 comp;
+#define NV50_DMA_V0_KIND_PITCH 0x00
+#define NV50_DMA_V0_KIND_VM 0x7f
+ __u8 kind;
+ __u8 pad05[3];
+};
+
+struct gf100_dma_v0 {
+ __u8 version;
+#define GF100_DMA_V0_PRIV_VM 0x00
+#define GF100_DMA_V0_PRIV_US 0x01
+#define GF100_DMA_V0_PRIV__S 0x02
+ __u8 priv;
+#define GF100_DMA_V0_KIND_PITCH 0x00
+#define GF100_DMA_V0_KIND_VM 0xff
+ __u8 kind;
+ __u8 pad03[5];
+};
+
+struct gf110_dma_v0 {
+ __u8 version;
+#define GF110_DMA_V0_PAGE_LP 0x00
+#define GF110_DMA_V0_PAGE_SP 0x01
+ __u8 page;
+#define GF110_DMA_V0_KIND_PITCH 0x00
+#define GF110_DMA_V0_KIND_VM 0xff
+ __u8 kind;
+ __u8 pad03[5];
+};
+
+
+/*******************************************************************************
+ * perfmon
+ ******************************************************************************/
+
+struct nvif_perfctr_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u16 logic_op;
+ __u8 pad04[4];
+ char name[4][64];
+};
+
+#define NVIF_PERFCTR_V0_QUERY 0x00
+#define NVIF_PERFCTR_V0_SAMPLE 0x01
+#define NVIF_PERFCTR_V0_READ 0x02
+
+struct nvif_perfctr_query_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 iter;
+ char name[64];
+};
+
+struct nvif_perfctr_sample {
+};
+
+struct nvif_perfctr_read_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u32 ctr;
+ __u32 clk;
+};
+
+
+/*******************************************************************************
+ * device control
+ ******************************************************************************/
+
+#define NVIF_CONTROL_PSTATE_INFO 0x00
+#define NVIF_CONTROL_PSTATE_ATTR 0x01
+#define NVIF_CONTROL_PSTATE_USER 0x02
+
+struct nvif_control_pstate_info_v0 {
+ __u8 version;
+ __u8 count; /* out: number of power states */
+#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE (-1)
+#define NVIF_CONTROL_PSTATE_INFO_V0_USTATE_PERFMON (-2)
+ __s8 ustate_ac; /* out: target pstate index */
+ __s8 ustate_dc; /* out: target pstate index */
+ __s8 pwrsrc; /* out: current power source */
+#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN (-1)
+#define NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_PERFMON (-2)
+ __s8 pstate; /* out: current pstate index */
+ __u8 pad06[2];
+};
+
+struct nvif_control_pstate_attr_v0 {
+ __u8 version;
+#define NVIF_CONTROL_PSTATE_ATTR_V0_STATE_CURRENT (-1)
+ __s8 state; /* in: index of pstate to query
+ * out: pstate identifier
+ */
+ __u8 index; /* in: index of attribute to query
+ * out: index of next attribute, or 0 if no more
+ */
+ __u8 pad03[5];
+ __u32 min;
+ __u32 max;
+ char name[32];
+ char unit[16];
+};
+
+struct nvif_control_pstate_user_v0 {
+ __u8 version;
+#define NVIF_CONTROL_PSTATE_USER_V0_STATE_UNKNOWN (-1)
+#define NVIF_CONTROL_PSTATE_USER_V0_STATE_PERFMON (-2)
+ __s8 ustate; /* in: pstate identifier */
+ __s8 pwrsrc; /* in: target power source */
+ __u8 pad03[5];
+};
+
+
+/*******************************************************************************
+ * DMA FIFO channels
+ ******************************************************************************/
+
+struct nv03_channel_dma_v0 {
+ __u8 version;
+ __u8 chid;
+ __u8 pad02[2];
+ __u32 pushbuf;
+ __u64 offset;
+};
+
+#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
+
+/*******************************************************************************
+ * GPFIFO channels
+ ******************************************************************************/
+
+struct nv50_channel_gpfifo_v0 {
+ __u8 version;
+ __u8 chid;
+ __u8 pad01[6];
+ __u32 pushbuf;
+ __u32 ilength;
+ __u64 ioffset;
+};
+
+struct kepler_channel_gpfifo_a_v0 {
+ __u8 version;
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR 0x01
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_VP 0x02
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_PPP 0x04
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_BSP 0x08
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0 0x10
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1 0x20
+#define KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_ENC 0x40
+ __u8 engine;
+ __u16 chid;
+ __u8 pad04[4];
+ __u32 pushbuf;
+ __u32 ilength;
+ __u64 ioffset;
+};
+
+/*******************************************************************************
+ * legacy display
+ ******************************************************************************/
+
+#define NV04_DISP_NTFY_VBLANK 0x00
+#define NV04_DISP_NTFY_CONN 0x01
+
+struct nv04_disp_mthd_v0 {
+ __u8 version;
+#define NV04_DISP_SCANOUTPOS 0x00
+ __u8 method;
+ __u8 head;
+ __u8 pad03[5];
+};
+
+struct nv04_disp_scanoutpos_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __s64 time[2];
+ __u16 vblanks;
+ __u16 vblanke;
+ __u16 vtotal;
+ __u16 vline;
+ __u16 hblanks;
+ __u16 hblanke;
+ __u16 htotal;
+ __u16 hline;
+};
+
+/*******************************************************************************
+ * display
+ ******************************************************************************/
+
+#define NV50_DISP_MTHD 0x00
+
+struct nv50_disp_mthd_v0 {
+ __u8 version;
+#define NV50_DISP_SCANOUTPOS 0x00
+ __u8 method;
+ __u8 head;
+ __u8 pad03[5];
+};
+
+struct nv50_disp_mthd_v1 {
+ __u8 version;
+#define NV50_DISP_MTHD_V1_DAC_PWR 0x10
+#define NV50_DISP_MTHD_V1_DAC_LOAD 0x11
+#define NV50_DISP_MTHD_V1_SOR_PWR 0x20
+#define NV50_DISP_MTHD_V1_SOR_HDA_ELD 0x21
+#define NV50_DISP_MTHD_V1_SOR_HDMI_PWR 0x22
+#define NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT 0x23
+#define NV50_DISP_MTHD_V1_SOR_DP_PWR 0x24
+#define NV50_DISP_MTHD_V1_PIOR_PWR 0x30
+ __u8 method;
+ __u16 hasht;
+ __u16 hashm;
+ __u8 pad06[2];
+};
+
+struct nv50_disp_dac_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 data;
+ __u8 vsync;
+ __u8 hsync;
+ __u8 pad05[3];
+};
+
+struct nv50_disp_dac_load_v0 {
+ __u8 version;
+ __u8 load;
+ __u16 data;
+ __u8 pad04[4];
+};
+
+struct nv50_disp_sor_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_sor_hda_eld_v0 {
+ __u8 version;
+ __u8 pad01[7];
+ __u8 data[];
+};
+
+struct nv50_disp_sor_hdmi_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 max_ac_packet;
+ __u8 rekey;
+ __u8 pad04[4];
+};
+
+struct nv50_disp_sor_lvds_script_v0 {
+ __u8 version;
+ __u8 pad01[1];
+ __u16 script;
+ __u8 pad04[4];
+};
+
+struct nv50_disp_sor_dp_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 pad02[6];
+};
+
+struct nv50_disp_pior_pwr_v0 {
+ __u8 version;
+ __u8 state;
+ __u8 type;
+ __u8 pad03[5];
+};
+
+/* core */
+struct nv50_disp_core_channel_dma_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 pushbuf;
+};
+
+/* cursor immediate */
+struct nv50_disp_cursor_v0 {
+ __u8 version;
+ __u8 head;
+ __u8 pad02[6];
+};
+
+/* base */
+struct nv50_disp_base_channel_dma_v0 {
+ __u8 version;
+ __u8 pad01[2];
+ __u8 head;
+ __u32 pushbuf;
+};
+
+/* overlay */
+struct nv50_disp_overlay_channel_dma_v0 {
+ __u8 version;
+ __u8 pad01[2];
+ __u8 head;
+ __u32 pushbuf;
+};
+
+/* overlay immediate */
+struct nv50_disp_overlay_v0 {
+ __u8 version;
+ __u8 head;
+ __u8 pad02[6];
+};
+
+
+/*******************************************************************************
+ * fermi
+ ******************************************************************************/
+
+#define FERMI_A_ZBC_COLOR 0x00
+#define FERMI_A_ZBC_DEPTH 0x01
+
+struct fermi_a_zbc_color_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_COLOR_V0_FMT_ZERO 0x01
+#define FERMI_A_ZBC_COLOR_V0_FMT_UNORM_ONE 0x02
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF32_GF32_BF32_AF32 0x04
+#define FERMI_A_ZBC_COLOR_V0_FMT_R16_G16_B16_A16 0x08
+#define FERMI_A_ZBC_COLOR_V0_FMT_RN16_GN16_BN16_AN16 0x0c
+#define FERMI_A_ZBC_COLOR_V0_FMT_RS16_GS16_BS16_AS16 0x10
+#define FERMI_A_ZBC_COLOR_V0_FMT_RU16_GU16_BU16_AU16 0x14
+#define FERMI_A_ZBC_COLOR_V0_FMT_RF16_GF16_BF16_AF16 0x16
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8R8G8B8 0x18
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8RL8GL8BL8 0x1c
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2B10G10R10 0x20
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU2BU10GU10RU10 0x24
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8B8G8R8 0x28
+#define FERMI_A_ZBC_COLOR_V0_FMT_A8BL8GL8RL8 0x2c
+#define FERMI_A_ZBC_COLOR_V0_FMT_AN8BN8GN8RN8 0x30
+#define FERMI_A_ZBC_COLOR_V0_FMT_AS8BS8GS8RS8 0x34
+#define FERMI_A_ZBC_COLOR_V0_FMT_AU8BU8GU8RU8 0x38
+#define FERMI_A_ZBC_COLOR_V0_FMT_A2R10G10B10 0x3c
+#define FERMI_A_ZBC_COLOR_V0_FMT_BF10GF11RF11 0x40
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds[4];
+ __u32 l2[4];
+};
+
+struct fermi_a_zbc_depth_v0 {
+ __u8 version;
+#define FERMI_A_ZBC_DEPTH_V0_FMT_FP32 0x01
+ __u8 format;
+ __u8 index;
+ __u8 pad03[5];
+ __u32 ds;
+ __u32 l2;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
new file mode 100644
index 000000000000..3c4df1fc26dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2013 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "client.h"
+#include "driver.h"
+#include "ioctl.h"
+
+int
+nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
+{
+ return client->driver->ioctl(client->base.priv, client->super, data, size, NULL);
+}
+
+int
+nvif_client_suspend(struct nvif_client *client)
+{
+ return client->driver->suspend(client->base.priv);
+}
+
+int
+nvif_client_resume(struct nvif_client *client)
+{
+ return client->driver->resume(client->base.priv);
+}
+
+void
+nvif_client_fini(struct nvif_client *client)
+{
+ if (client->driver) {
+ client->driver->fini(client->base.priv);
+ client->driver = NULL;
+ client->base.parent = NULL;
+ nvif_object_fini(&client->base);
+ }
+}
+
+const struct nvif_driver *
+nvif_drivers[] = {
+#ifdef __KERNEL__
+ &nvif_driver_nvkm,
+#else
+ &nvif_driver_drm,
+ &nvif_driver_lib,
+#endif
+ NULL
+};
+
+int
+nvif_client_init(void (*dtor)(struct nvif_client *), const char *driver,
+ const char *name, u64 device, const char *cfg, const char *dbg,
+ struct nvif_client *client)
+{
+ int ret, i;
+
+ ret = nvif_object_init(NULL, (void*)dtor, 0, 0, NULL, 0, &client->base);
+ if (ret)
+ return ret;
+
+ client->base.parent = &client->base;
+ client->base.handle = ~0;
+ client->object = &client->base;
+ client->super = true;
+
+ for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
+ if (!driver || !strcmp(client->driver->name, driver)) {
+ ret = client->driver->init(name, device, cfg, dbg,
+ &client->base.priv);
+ if (!ret || driver)
+ break;
+ }
+ }
+
+ if (ret)
+ nvif_client_fini(client);
+ return ret;
+}
+
+static void
+nvif_client_del(struct nvif_client *client)
+{
+ nvif_client_fini(client);
+ kfree(client);
+}
+
+int
+nvif_client_new(const char *driver, const char *name, u64 device,
+ const char *cfg, const char *dbg,
+ struct nvif_client **pclient)
+{
+ struct nvif_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (client) {
+ int ret = nvif_client_init(nvif_client_del, driver, name,
+ device, cfg, dbg, client);
+ if (ret) {
+ kfree(client);
+ client = NULL;
+ }
+ *pclient = client;
+ return ret;
+ }
+ return -ENOMEM;
+}
+
+void
+nvif_client_ref(struct nvif_client *client, struct nvif_client **pclient)
+{
+ nvif_object_ref(&client->base, (struct nvif_object **)pclient);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/client.h b/drivers/gpu/drm/nouveau/nvif/client.h
new file mode 100644
index 000000000000..28352f0882ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/client.h
@@ -0,0 +1,39 @@
+#ifndef __NVIF_CLIENT_H__
+#define __NVIF_CLIENT_H__
+
+#include "object.h"
+
+struct nvif_client {
+ struct nvif_object base;
+ struct nvif_object *object; /*XXX: hack for nvif_object() */
+ const struct nvif_driver *driver;
+ bool super;
+};
+
+static inline struct nvif_client *
+nvif_client(struct nvif_object *object)
+{
+ while (object && object->parent != object)
+ object = object->parent;
+ return (void *)object;
+}
+
+int nvif_client_init(void (*dtor)(struct nvif_client *), const char *,
+ const char *, u64, const char *, const char *,
+ struct nvif_client *);
+void nvif_client_fini(struct nvif_client *);
+int nvif_client_new(const char *, const char *, u64, const char *,
+ const char *, struct nvif_client **);
+void nvif_client_ref(struct nvif_client *, struct nvif_client **);
+int nvif_client_ioctl(struct nvif_client *, void *, u32);
+int nvif_client_suspend(struct nvif_client *);
+int nvif_client_resume(struct nvif_client *);
+
+/*XXX*/
+#include <core/client.h>
+#define nvkm_client(a) ({ \
+ struct nvif_client *_client = nvif_client(nvif_object(a)); \
+ nouveau_client(_client->base.priv); \
+})
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
new file mode 100644
index 000000000000..f477579725e3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "device.h"
+
+void
+nvif_device_fini(struct nvif_device *device)
+{
+ nvif_object_fini(&device->base);
+}
+
+int
+nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
+ u32 handle, u32 oclass, void *data, u32 size,
+ struct nvif_device *device)
+{
+ int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
+ data, size, &device->base);
+ if (ret == 0) {
+ device->object = &device->base;
+ device->info.version = 0;
+ ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
+ &device->info, sizeof(device->info));
+ }
+ return ret;
+}
+
+static void
+nvif_device_del(struct nvif_device *device)
+{
+ nvif_device_fini(device);
+ kfree(device);
+}
+
+int
+nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
+ void *data, u32 size, struct nvif_device **pdevice)
+{
+ struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (device) {
+ int ret = nvif_device_init(parent, nvif_device_del, handle,
+ oclass, data, size, device);
+ if (ret) {
+ kfree(device);
+ device = NULL;
+ }
+ *pdevice = device;
+ return ret;
+ }
+ return -ENOMEM;
+}
+
+void
+nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
+{
+ nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/device.h b/drivers/gpu/drm/nouveau/nvif/device.h
new file mode 100644
index 000000000000..43180f9fe630
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/device.h
@@ -0,0 +1,62 @@
+#ifndef __NVIF_DEVICE_H__
+#define __NVIF_DEVICE_H__
+
+#include "object.h"
+#include "class.h"
+
+struct nvif_device {
+ struct nvif_object base;
+ struct nvif_object *object; /*XXX: hack for nvif_object() */
+ struct nv_device_info_v0 info;
+};
+
+static inline struct nvif_device *
+nvif_device(struct nvif_object *object)
+{
+ while (object && object->oclass != 0x0080 /*XXX: NV_DEVICE_CLASS*/ )
+ object = object->parent;
+ return (void *)object;
+}
+
+int nvif_device_init(struct nvif_object *, void (*dtor)(struct nvif_device *),
+ u32 handle, u32 oclass, void *, u32,
+ struct nvif_device *);
+void nvif_device_fini(struct nvif_device *);
+int nvif_device_new(struct nvif_object *, u32 handle, u32 oclass,
+ void *, u32, struct nvif_device **);
+void nvif_device_ref(struct nvif_device *, struct nvif_device **);
+
+/*XXX*/
+#include <subdev/bios.h>
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/gpio.h>
+#include <subdev/clock.h>
+#include <subdev/i2c.h>
+#include <subdev/timer.h>
+#include <subdev/therm.h>
+
+#define nvkm_device(a) nv_device(nvkm_object((a)))
+#define nvkm_bios(a) nouveau_bios(nvkm_device(a))
+#define nvkm_fb(a) nouveau_fb(nvkm_device(a))
+#define nvkm_vmmgr(a) nouveau_vmmgr(nvkm_device(a))
+#define nvkm_bar(a) nouveau_bar(nvkm_device(a))
+#define nvkm_gpio(a) nouveau_gpio(nvkm_device(a))
+#define nvkm_clock(a) nouveau_clock(nvkm_device(a))
+#define nvkm_i2c(a) nouveau_i2c(nvkm_device(a))
+#define nvkm_timer(a) nouveau_timer(nvkm_device(a))
+#define nvkm_wait(a,b,c,d) nv_wait(nvkm_timer(a), (b), (c), (d))
+#define nvkm_wait_cb(a,b,c) nv_wait_cb(nvkm_timer(a), (b), (c))
+#define nvkm_therm(a) nouveau_therm(nvkm_device(a))
+
+#include <engine/device.h>
+#include <engine/fifo.h>
+#include <engine/graph.h>
+#include <engine/software.h>
+
+#define nvkm_fifo(a) nouveau_fifo(nvkm_device(a))
+#define nvkm_fifo_chan(a) ((struct nouveau_fifo_chan *)nvkm_object(a))
+#define nvkm_gr(a) ((struct nouveau_graph *)nouveau_engine(nvkm_object(a), NVDEV_ENGINE_GR))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
new file mode 100644
index 000000000000..b72a8f0c2758
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -0,0 +1,21 @@
+#ifndef __NVIF_DRIVER_H__
+#define __NVIF_DRIVER_H__
+
+struct nvif_driver {
+ const char *name;
+ int (*init)(const char *name, u64 device, const char *cfg,
+ const char *dbg, void **priv);
+ void (*fini)(void *priv);
+ int (*suspend)(void *priv);
+ int (*resume)(void *priv);
+ int (*ioctl)(void *priv, bool super, void *data, u32 size, void **hack);
+ void *(*map)(void *priv, u64 handle, u32 size);
+ void (*unmap)(void *priv, void *ptr, u32 size);
+ bool keep;
+};
+
+extern const struct nvif_driver nvif_driver_nvkm;
+extern const struct nvif_driver nvif_driver_drm;
+extern const struct nvif_driver nvif_driver_lib;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/event.h b/drivers/gpu/drm/nouveau/nvif/event.h
new file mode 100644
index 000000000000..21764499b4be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/event.h
@@ -0,0 +1,62 @@
+#ifndef __NVIF_EVENT_H__
+#define __NVIF_EVENT_H__
+
+struct nvif_notify_req_v0 {
+ __u8 version;
+ __u8 reply;
+ __u8 pad02[5];
+#define NVIF_NOTIFY_V0_ROUTE_NVIF 0x00
+ __u8 route;
+ __u64 token; /* must be unique */
+ __u8 data[]; /* request data (below) */
+};
+
+struct nvif_notify_rep_v0 {
+ __u8 version;
+ __u8 pad01[6];
+ __u8 route;
+ __u64 token;
+ __u8 data[]; /* reply data (below) */
+};
+
+struct nvif_notify_head_req_v0 {
+ /* nvif_notify_req ... */
+ __u8 version;
+ __u8 head;
+ __u8 pad02[6];
+};
+
+struct nvif_notify_head_rep_v0 {
+ /* nvif_notify_rep ... */
+ __u8 version;
+ __u8 pad01[7];
+};
+
+struct nvif_notify_conn_req_v0 {
+ /* nvif_notify_req ... */
+ __u8 version;
+#define NVIF_NOTIFY_CONN_V0_PLUG 0x01
+#define NVIF_NOTIFY_CONN_V0_UNPLUG 0x02
+#define NVIF_NOTIFY_CONN_V0_IRQ 0x04
+#define NVIF_NOTIFY_CONN_V0_ANY 0x07
+ __u8 mask;
+ __u8 conn;
+ __u8 pad03[5];
+};
+
+struct nvif_notify_conn_rep_v0 {
+ /* nvif_notify_rep ... */
+ __u8 version;
+ __u8 mask;
+ __u8 pad02[6];
+};
+
+struct nvif_notify_uevent_req {
+ /* nvif_notify_req ... */
+};
+
+struct nvif_notify_uevent_rep {
+ /* nvif_notify_rep ... */
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/ioctl.h b/drivers/gpu/drm/nouveau/nvif/ioctl.h
new file mode 100644
index 000000000000..4cd8e323b23d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/ioctl.h
@@ -0,0 +1,128 @@
+#ifndef __NVIF_IOCTL_H__
+#define __NVIF_IOCTL_H__
+
+struct nvif_ioctl_v0 {
+ __u8 version;
+#define NVIF_IOCTL_V0_OWNER_NVIF 0x00
+#define NVIF_IOCTL_V0_OWNER_ANY 0xff
+ __u8 owner;
+#define NVIF_IOCTL_V0_NOP 0x00
+#define NVIF_IOCTL_V0_SCLASS 0x01
+#define NVIF_IOCTL_V0_NEW 0x02
+#define NVIF_IOCTL_V0_DEL 0x03
+#define NVIF_IOCTL_V0_MTHD 0x04
+#define NVIF_IOCTL_V0_RD 0x05
+#define NVIF_IOCTL_V0_WR 0x06
+#define NVIF_IOCTL_V0_MAP 0x07
+#define NVIF_IOCTL_V0_UNMAP 0x08
+#define NVIF_IOCTL_V0_NTFY_NEW 0x09
+#define NVIF_IOCTL_V0_NTFY_DEL 0x0a
+#define NVIF_IOCTL_V0_NTFY_GET 0x0b
+#define NVIF_IOCTL_V0_NTFY_PUT 0x0c
+ __u8 type;
+ __u8 path_nr;
+#define NVIF_IOCTL_V0_ROUTE_NVIF 0x00
+#define NVIF_IOCTL_V0_ROUTE_HIDDEN 0xff
+ __u8 pad04[3];
+ __u8 route;
+ __u64 token;
+ __u32 path[8]; /* in reverse */
+ __u8 data[]; /* ioctl data (below) */
+};
+
+struct nvif_ioctl_nop {
+};
+
+struct nvif_ioctl_sclass_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 count;
+ __u8 pad02[6];
+ __u32 oclass[];
+};
+
+struct nvif_ioctl_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[6];
+ __u8 route;
+ __u64 token;
+ __u32 handle;
+/* these class numbers are made up by us, and not nvidia-assigned */
+#define NVIF_IOCTL_NEW_V0_PERFCTR 0x0000ffff
+#define NVIF_IOCTL_NEW_V0_CONTROL 0x0000fffe
+ __u32 oclass;
+ __u8 data[]; /* class data (class.h) */
+};
+
+struct nvif_ioctl_del {
+};
+
+struct nvif_ioctl_rd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_wr_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 size;
+ __u8 pad02[2];
+ __u32 data;
+ __u64 addr;
+};
+
+struct nvif_ioctl_map_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 pad01[3];
+ __u32 length;
+ __u64 handle;
+};
+
+struct nvif_ioctl_unmap {
+};
+
+struct nvif_ioctl_ntfy_new_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 event;
+ __u8 index;
+ __u8 pad03[5];
+ __u8 data[]; /* event request data (event.h) */
+};
+
+struct nvif_ioctl_ntfy_del_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_get_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_ntfy_put_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 index;
+ __u8 pad02[6];
+};
+
+struct nvif_ioctl_mthd_v0 {
+ /* nvif_ioctl ... */
+ __u8 version;
+ __u8 method;
+ __u8 pad02[6];
+ __u8 data[]; /* method data (class.h) */
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/list.h b/drivers/gpu/drm/nouveau/nvif/list.h
new file mode 100644
index 000000000000..8af5d144ecb0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/list.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ * Copyright © 2010 Francisco Jerez <currojerez@riseup.net>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+/* Modified by Ben Skeggs <bskeggs@redhat.com> to match kernel list APIs */
+
+#ifndef _XORG_LIST_H_
+#define _XORG_LIST_H_
+
+/**
+ * @file Classic doubly-link circular list implementation.
+ * For real usage examples of the linked list, see the file test/list.c
+ *
+ * Example:
+ * We need to keep a list of struct foo in the parent struct bar, i.e. what
+ * we want is something like this.
+ *
+ * struct bar {
+ * ...
+ * struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{}
+ * ...
+ * }
+ *
+ * We need one list head in bar and a list element in all list_of_foos (both are of
+ * data type 'struct list_head').
+ *
+ * struct bar {
+ * ...
+ * struct list_head list_of_foos;
+ * ...
+ * }
+ *
+ * struct foo {
+ * ...
+ * struct list_head entry;
+ * ...
+ * }
+ *
+ * Now we initialize the list head:
+ *
+ * struct bar bar;
+ * ...
+ * INIT_LIST_HEAD(&bar.list_of_foos);
+ *
+ * Then we create the first element and add it to this list:
+ *
+ * struct foo *foo = malloc(...);
+ * ....
+ * list_add(&foo->entry, &bar.list_of_foos);
+ *
+ * Repeat the above for each element you want to add to the list. Deleting
+ * works with the element itself.
+ * list_del(&foo->entry);
+ * free(foo);
+ *
+ * Note: calling list_del(&bar.list_of_foos) will set bar.list_of_foos to an empty
+ * list again.
+ *
+ * Looping through the list requires a 'struct foo' as iterator and the
+ * name of the field the subnodes use.
+ *
+ * struct foo *iterator;
+ * list_for_each_entry(iterator, &bar.list_of_foos, entry) {
+ * if (iterator->something == ...)
+ * ...
+ * }
+ *
+ * Note: You must not call list_del() on the iterator if you continue the
+ * loop. You need to run the safe for-each loop instead:
+ *
+ * struct foo *iterator, *next;
+ * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) {
+ * if (...)
+ * list_del(&iterator->entry);
+ * }
+ *
+ */
+
+/**
+ * The linkage struct for list nodes. This struct must be part of your
+ * to-be-linked struct. struct list_head is required for both the head of the
+ * list and for each list node.
+ *
+ * Position and name of the struct list_head field is irrelevant.
+ * There are no requirements that elements of a list are of the same type.
+ * There are no requirements for a list head, any struct list_head can be a list
+ * head.
+ */
+struct list_head {
+ struct list_head *next, *prev;
+};
+
+/**
+ * Initialize the list as an empty list.
+ *
+ * Example:
+ * INIT_LIST_HEAD(&bar->list_of_foos);
+ *
+ * @param The list to initialized.
+ */
+#define LIST_HEAD_INIT(name) { &(name), &(name) }
+
+#define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+
+static inline void
+INIT_LIST_HEAD(struct list_head *list)
+{
+ list->next = list->prev = list;
+}
+
+static inline void
+__list_add(struct list_head *entry,
+ struct list_head *prev, struct list_head *next)
+{
+ next->prev = entry;
+ entry->next = next;
+ entry->prev = prev;
+ prev->next = entry;
+}
+
+/**
+ * Insert a new element after the given list head. The new element does not
+ * need to be initialised as empty list.
+ * The list changes from:
+ * head → some element → ...
+ * to
+ * head → new element → older element → ...
+ *
+ * Example:
+ * struct foo *newfoo = malloc(...);
+ * list_add(&newfoo->entry, &bar->list_of_foos);
+ *
+ * @param entry The new element to prepend to the list.
+ * @param head The existing list.
+ */
+static inline void
+list_add(struct list_head *entry, struct list_head *head)
+{
+ __list_add(entry, head, head->next);
+}
+
+/**
+ * Append a new element to the end of the list given with this list head.
+ *
+ * The list changes from:
+ * head → some element → ... → lastelement
+ * to
+ * head → some element → ... → lastelement → new element
+ *
+ * Example:
+ * struct foo *newfoo = malloc(...);
+ * list_add_tail(&newfoo->entry, &bar->list_of_foos);
+ *
+ * @param entry The new element to prepend to the list.
+ * @param head The existing list.
+ */
+static inline void
+list_add_tail(struct list_head *entry, struct list_head *head)
+{
+ __list_add(entry, head->prev, head);
+}
+
+static inline void
+__list_del(struct list_head *prev, struct list_head *next)
+{
+ next->prev = prev;
+ prev->next = next;
+}
+
+/**
+ * Remove the element from the list it is in. Using this function will reset
+ * the pointers to/from this element so it is removed from the list. It does
+ * NOT free the element itself or manipulate it otherwise.
+ *
+ * Using list_del on a pure list head (like in the example at the top of
+ * this file) will NOT remove the first element from
+ * the list but rather reset the list as empty list.
+ *
+ * Example:
+ * list_del(&foo->entry);
+ *
+ * @param entry The element to remove.
+ */
+static inline void
+list_del(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+}
+
+static inline void
+list_del_init(struct list_head *entry)
+{
+ __list_del(entry->prev, entry->next);
+ INIT_LIST_HEAD(entry);
+}
+
+static inline void list_move_tail(struct list_head *list,
+ struct list_head *head)
+{
+ __list_del(list->prev, list->next);
+ list_add_tail(list, head);
+}
+
+/**
+ * Check if the list is empty.
+ *
+ * Example:
+ * list_empty(&bar->list_of_foos);
+ *
+ * @return True if the list contains one or more elements or False otherwise.
+ */
+static inline bool
+list_empty(struct list_head *head)
+{
+ return head->next == head;
+}
+
+/**
+ * Returns a pointer to the container of this list element.
+ *
+ * Example:
+ * struct foo* f;
+ * f = container_of(&foo->entry, struct foo, entry);
+ * assert(f == foo);
+ *
+ * @param ptr Pointer to the struct list_head.
+ * @param type Data type of the list element.
+ * @param member Member name of the struct list_head field in the list element.
+ * @return A pointer to the data struct containing the list head.
+ */
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ (type *)((char *)(ptr) - (char *) &((type *)0)->member)
+#endif
+
+/**
+ * Alias of container_of
+ */
+#define list_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+/**
+ * Retrieve the first list entry for the given list pointer.
+ *
+ * Example:
+ * struct foo *first;
+ * first = list_first_entry(&bar->list_of_foos, struct foo, list_of_foos);
+ *
+ * @param ptr The list head
+ * @param type Data type of the list element to retrieve
+ * @param member Member name of the struct list_head field in the list element.
+ * @return A pointer to the first list element.
+ */
+#define list_first_entry(ptr, type, member) \
+ list_entry((ptr)->next, type, member)
+
+/**
+ * Retrieve the last list entry for the given listpointer.
+ *
+ * Example:
+ * struct foo *first;
+ * first = list_last_entry(&bar->list_of_foos, struct foo, list_of_foos);
+ *
+ * @param ptr The list head
+ * @param type Data type of the list element to retrieve
+ * @param member Member name of the struct list_head field in the list element.
+ * @return A pointer to the last list element.
+ */
+#define list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+#define __container_of(ptr, sample, member) \
+ (void *)container_of((ptr), typeof(*(sample)), member)
+
+/**
+ * Loop through the list given by head and set pos to struct in the list.
+ *
+ * Example:
+ * struct foo *iterator;
+ * list_for_each_entry(iterator, &bar->list_of_foos, entry) {
+ * [modify iterator]
+ * }
+ *
+ * This macro is not safe for node deletion. Use list_for_each_entry_safe
+ * instead.
+ *
+ * @param pos Iterator variable of the type of the list elements.
+ * @param head List head
+ * @param member Member name of the struct list_head in the list elements.
+ *
+ */
+#define list_for_each_entry(pos, head, member) \
+ for (pos = __container_of((head)->next, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.next, pos, member))
+
+/**
+ * Loop through the list, keeping a backup pointer to the element. This
+ * macro allows for the deletion of a list element while looping through the
+ * list.
+ *
+ * See list_for_each_entry for more details.
+ */
+#define list_for_each_entry_safe(pos, tmp, head, member) \
+ for (pos = __container_of((head)->next, pos, member), \
+ tmp = __container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = tmp, tmp = __container_of(pos->member.next, tmp, member))
+
+
+#define list_for_each_entry_reverse(pos, head, member) \
+ for (pos = __container_of((head)->prev, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.prev, pos, member))
+
+#define list_for_each_entry_continue(pos, head, member) \
+ for (pos = __container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.next, pos, member))
+
+#define list_for_each_entry_continue_reverse(pos, head, member) \
+ for (pos = __container_of(pos->member.prev, pos, member); \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.prev, pos, member))
+
+#define list_for_each_entry_from(pos, head, member) \
+ for (; \
+ &pos->member != (head); \
+ pos = __container_of(pos->member.next, pos, member))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
new file mode 100644
index 000000000000..7c06123a559c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include <nvif/client.h>
+#include <nvif/driver.h>
+#include <nvif/notify.h>
+#include <nvif/object.h>
+#include <nvif/ioctl.h>
+#include <nvif/event.h>
+
+static inline int
+nvif_notify_put_(struct nvif_notify *notify)
+{
+ struct nvif_object *object = notify->object;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_put_v0 ntfy;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_NTFY_PUT,
+ .ntfy.index = notify->index,
+ };
+
+ if (atomic_inc_return(&notify->putcnt) != 1)
+ return 0;
+
+ return nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_notify_put(struct nvif_notify *notify)
+{
+ if (likely(notify->object) &&
+ test_and_clear_bit(NVIF_NOTIFY_USER, &notify->flags)) {
+ int ret = nvif_notify_put_(notify);
+ if (test_bit(NVIF_NOTIFY_WORK, &notify->flags))
+ flush_work(&notify->work);
+ return ret;
+ }
+ return 0;
+}
+
+static inline int
+nvif_notify_get_(struct nvif_notify *notify)
+{
+ struct nvif_object *object = notify->object;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_get_v0 ntfy;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_NTFY_GET,
+ .ntfy.index = notify->index,
+ };
+
+ if (atomic_dec_return(&notify->putcnt) != 0)
+ return 0;
+
+ return nvif_object_ioctl(object, &args, sizeof(args), NULL);
+}
+
+int
+nvif_notify_get(struct nvif_notify *notify)
+{
+ if (likely(notify->object) &&
+ !test_and_set_bit(NVIF_NOTIFY_USER, &notify->flags))
+ return nvif_notify_get_(notify);
+ return 0;
+}
+
+static void
+nvif_notify_work(struct work_struct *work)
+{
+ struct nvif_notify *notify = container_of(work, typeof(*notify), work);
+ if (notify->func(notify) == NVIF_NOTIFY_KEEP)
+ nvif_notify_get_(notify);
+}
+
+int
+nvif_notify(const void *header, u32 length, const void *data, u32 size)
+{
+ struct nvif_notify *notify = NULL;
+ const union {
+ struct nvif_notify_rep_v0 v0;
+ } *args = header;
+ int ret = NVIF_NOTIFY_DROP;
+
+ if (length == sizeof(args->v0) && args->v0.version == 0) {
+ if (WARN_ON(args->v0.route))
+ return NVIF_NOTIFY_DROP;
+ notify = (void *)(unsigned long)args->v0.token;
+ }
+
+ if (!WARN_ON(notify == NULL)) {
+ struct nvif_client *client = nvif_client(notify->object);
+ if (!WARN_ON(notify->size != size)) {
+ if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
+ atomic_inc(&notify->putcnt);
+ memcpy((void *)notify->data, data, size);
+ schedule_work(&notify->work);
+ return NVIF_NOTIFY_DROP;
+ }
+ notify->data = data;
+ ret = notify->func(notify);
+ notify->data = NULL;
+ if (ret != NVIF_NOTIFY_DROP && client->driver->keep) {
+ atomic_inc(&notify->putcnt);
+ nvif_notify_get_(notify);
+ }
+ }
+ }
+
+ return ret;
+}
+
+int
+nvif_notify_fini(struct nvif_notify *notify)
+{
+ struct nvif_object *object = notify->object;
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_del_v0 ntfy;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_NTFY_DEL,
+ .ntfy.index = notify->index,
+ };
+ int ret = nvif_notify_put(notify);
+ if (ret >= 0 && object) {
+ ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret == 0) {
+ nvif_object_ref(NULL, &notify->object);
+ kfree((void *)notify->data);
+ }
+ }
+ return ret;
+}
+
+int
+nvif_notify_init(struct nvif_object *object, void (*dtor)(struct nvif_notify *),
+ int (*func)(struct nvif_notify *), bool work, u8 event,
+ void *data, u32 size, u32 reply, struct nvif_notify *notify)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_ntfy_new_v0 ntfy;
+ struct nvif_notify_req_v0 req;
+ } *args;
+ int ret = -ENOMEM;
+
+ notify->object = NULL;
+ nvif_object_ref(object, &notify->object);
+ notify->flags = 0;
+ atomic_set(&notify->putcnt, 1);
+ notify->dtor = dtor;
+ notify->func = func;
+ notify->data = NULL;
+ notify->size = reply;
+ if (work) {
+ INIT_WORK(&notify->work, nvif_notify_work);
+ set_bit(NVIF_NOTIFY_WORK, &notify->flags);
+ notify->data = kmalloc(notify->size, GFP_KERNEL);
+ if (!notify->data)
+ goto done;
+ }
+
+ if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ goto done;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_NTFY_NEW;
+ args->ntfy.version = 0;
+ args->ntfy.event = event;
+ args->req.version = 0;
+ args->req.reply = notify->size;
+ args->req.route = 0;
+ args->req.token = (unsigned long)(void *)notify;
+
+ memcpy(args->req.data, data, size);
+ ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ notify->index = args->ntfy.index;
+ kfree(args);
+done:
+ if (ret)
+ nvif_notify_fini(notify);
+ return ret;
+}
+
+static void
+nvif_notify_del(struct nvif_notify *notify)
+{
+ nvif_notify_fini(notify);
+ kfree(notify);
+}
+
+void
+nvif_notify_ref(struct nvif_notify *notify, struct nvif_notify **pnotify)
+{
+ BUG_ON(notify != NULL);
+ if (*pnotify)
+ (*pnotify)->dtor(*pnotify);
+ *pnotify = notify;
+}
+
+int
+nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
+ bool work, u8 type, void *data, u32 size, u32 reply,
+ struct nvif_notify **pnotify)
+{
+ struct nvif_notify *notify = kzalloc(sizeof(*notify), GFP_KERNEL);
+ if (notify) {
+ int ret = nvif_notify_init(object, nvif_notify_del, func, work,
+ type, data, size, reply, notify);
+ if (ret)
+ kfree(notify);
+ *pnotify = notify;
+ return ret;
+ }
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.h b/drivers/gpu/drm/nouveau/nvif/notify.h
new file mode 100644
index 000000000000..9ebfa3b45e76
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/notify.h
@@ -0,0 +1,39 @@
+#ifndef __NVIF_NOTIFY_H__
+#define __NVIF_NOTIFY_H__
+
+struct nvif_notify {
+ struct nvif_object *object;
+ int index;
+
+#define NVIF_NOTIFY_USER 0
+#define NVIF_NOTIFY_WORK 1
+ unsigned long flags;
+ atomic_t putcnt;
+ void (*dtor)(struct nvif_notify *);
+#define NVIF_NOTIFY_DROP 0
+#define NVIF_NOTIFY_KEEP 1
+ int (*func)(struct nvif_notify *);
+
+ /* this is const for a *very* good reason - the data might be on the
+ * stack from an irq handler. if you're not nvif/notify.c then you
+ * should probably think twice before casting it away...
+ */
+ const void *data;
+ u32 size;
+ struct work_struct work;
+};
+
+int nvif_notify_init(struct nvif_object *, void (*dtor)(struct nvif_notify *),
+ int (*func)(struct nvif_notify *), bool work, u8 type,
+ void *data, u32 size, u32 reply, struct nvif_notify *);
+int nvif_notify_fini(struct nvif_notify *);
+int nvif_notify_get(struct nvif_notify *);
+int nvif_notify_put(struct nvif_notify *);
+int nvif_notify(const void *, u32, const void *, u32);
+
+int nvif_notify_new(struct nvif_object *, int (*func)(struct nvif_notify *),
+ bool work, u8 type, void *data, u32 size, u32 reply,
+ struct nvif_notify **);
+void nvif_notify_ref(struct nvif_notify *, struct nvif_notify **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
new file mode 100644
index 000000000000..b0c82206ece2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -0,0 +1,302 @@
+/*
+ * Copyright 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+
+#include "object.h"
+#include "client.h"
+#include "driver.h"
+#include "ioctl.h"
+
+int
+nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
+{
+ struct nvif_client *client = nvif_client(object);
+ union {
+ struct nvif_ioctl_v0 v0;
+ } *args = data;
+
+ if (size >= sizeof(*args) && args->v0.version == 0) {
+ args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
+ args->v0.path_nr = 0;
+ while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
+ args->v0.path[args->v0.path_nr++] = object->handle;
+ if (object->parent == object)
+ break;
+ object = object->parent;
+ }
+ } else
+ return -ENOSYS;
+
+ return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
+}
+
+int
+nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_sclass_v0 sclass;
+ } *args;
+ u32 size = count * sizeof(args->sclass.oclass[0]);
+ int ret;
+
+ if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ return -ENOMEM;
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
+ args->sclass.version = 0;
+ args->sclass.count = count;
+
+ memcpy(args->sclass.oclass, oclass, size);
+ ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ ret = ret ? ret : args->sclass.count;
+ memcpy(oclass, args->sclass.oclass, size);
+ kfree(args);
+ return ret;
+}
+
+u32
+nvif_object_rd(struct nvif_object *object, int size, u64 addr)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_rd_v0 rd;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_RD,
+ .rd.size = size,
+ .rd.addr = addr,
+ };
+ int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret) {
+ /*XXX: warn? */
+ return 0;
+ }
+ return args.rd.data;
+}
+
+void
+nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_wr_v0 wr;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_WR,
+ .wr.size = size,
+ .wr.addr = addr,
+ .wr.data = data,
+ };
+ int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret) {
+ /*XXX: warn? */
+ }
+}
+
+int
+nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
+{
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_mthd_v0 mthd;
+ } *args;
+ u8 stack[128];
+ int ret;
+
+ if (sizeof(*args) + size > sizeof(stack)) {
+ if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
+ return -ENOMEM;
+ } else {
+ args = (void *)stack;
+ }
+ args->ioctl.version = 0;
+ args->ioctl.type = NVIF_IOCTL_V0_MTHD;
+ args->mthd.version = 0;
+ args->mthd.method = mthd;
+
+ memcpy(args->mthd.data, data, size);
+ ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
+ memcpy(data, args->mthd.data, size);
+ if (args != (void *)stack)
+ kfree(args);
+ return ret;
+}
+
+void
+nvif_object_unmap(struct nvif_object *object)
+{
+ if (object->map.size) {
+ struct nvif_client *client = nvif_client(object);
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_unmap unmap;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_UNMAP,
+ };
+
+ if (object->map.ptr) {
+ client->driver->unmap(client, object->map.ptr,
+ object->map.size);
+ object->map.ptr = NULL;
+ }
+
+ nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ object->map.size = 0;
+ }
+}
+
+int
+nvif_object_map(struct nvif_object *object)
+{
+ struct nvif_client *client = nvif_client(object);
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_map_v0 map;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_MAP,
+ };
+ int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (ret == 0) {
+ object->map.size = args.map.length;
+ object->map.ptr = client->driver->map(client, args.map.handle,
+ object->map.size);
+ if (ret = -ENOMEM, object->map.ptr)
+ return 0;
+ nvif_object_unmap(object);
+ }
+ return ret;
+}
+
+struct ctor {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_new_v0 new;
+};
+
+void
+nvif_object_fini(struct nvif_object *object)
+{
+ struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
+ if (object->parent) {
+ struct {
+ struct nvif_ioctl_v0 ioctl;
+ struct nvif_ioctl_del del;
+ } args = {
+ .ioctl.type = NVIF_IOCTL_V0_DEL,
+ };
+
+ nvif_object_unmap(object);
+ nvif_object_ioctl(object, &args, sizeof(args), NULL);
+ if (object->data) {
+ object->size = 0;
+ object->data = NULL;
+ kfree(ctor);
+ }
+ nvif_object_ref(NULL, &object->parent);
+ }
+}
+
+int
+nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
+ u32 handle, u32 oclass, void *data, u32 size,
+ struct nvif_object *object)
+{
+ struct ctor *ctor;
+ int ret = 0;
+
+ object->parent = NULL;
+ object->object = object;
+ nvif_object_ref(parent, &object->parent);
+ kref_init(&object->refcount);
+ object->handle = handle;
+ object->oclass = oclass;
+ object->data = NULL;
+ object->size = 0;
+ object->dtor = dtor;
+ object->map.ptr = NULL;
+ object->map.size = 0;
+
+ if (object->parent) {
+ if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
+ nvif_object_fini(object);
+ return -ENOMEM;
+ }
+ object->data = ctor->new.data;
+ object->size = size;
+ memcpy(object->data, data, size);
+
+ ctor->ioctl.version = 0;
+ ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
+ ctor->new.version = 0;
+ ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
+ ctor->new.token = (unsigned long)(void *)object;
+ ctor->new.handle = handle;
+ ctor->new.oclass = oclass;
+
+ ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
+ object->size, &object->priv);
+ }
+
+ if (ret)
+ nvif_object_fini(object);
+ return ret;
+}
+
+static void
+nvif_object_del(struct nvif_object *object)
+{
+ nvif_object_fini(object);
+ kfree(object);
+}
+
+int
+nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
+ void *data, u32 size, struct nvif_object **pobject)
+{
+ struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
+ if (object) {
+ int ret = nvif_object_init(parent, nvif_object_del, handle,
+ oclass, data, size, object);
+ if (ret)
+ kfree(object);
+ *pobject = object;
+ return ret;
+ }
+ return -ENOMEM;
+}
+
+static void
+nvif_object_put(struct kref *kref)
+{
+ struct nvif_object *object =
+ container_of(kref, typeof(*object), refcount);
+ object->dtor(object);
+}
+
+void
+nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
+{
+ if (object)
+ kref_get(&object->refcount);
+ if (*pobject)
+ kref_put(&(*pobject)->refcount, nvif_object_put);
+ *pobject = object;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.h b/drivers/gpu/drm/nouveau/nvif/object.h
new file mode 100644
index 000000000000..fac3a3bbec44
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/object.h
@@ -0,0 +1,75 @@
+#ifndef __NVIF_OBJECT_H__
+#define __NVIF_OBJECT_H__
+
+#include <nvif/os.h>
+
+struct nvif_object {
+ struct nvif_object *parent;
+ struct nvif_object *object; /*XXX: hack for nvif_object() */
+ struct kref refcount;
+ u32 handle;
+ u32 oclass;
+ void *data;
+ u32 size;
+ void *priv; /*XXX: hack */
+ void (*dtor)(struct nvif_object *);
+ struct {
+ void *ptr;
+ u32 size;
+ } map;
+};
+
+int nvif_object_init(struct nvif_object *, void (*dtor)(struct nvif_object *),
+ u32 handle, u32 oclass, void *, u32,
+ struct nvif_object *);
+void nvif_object_fini(struct nvif_object *);
+int nvif_object_new(struct nvif_object *, u32 handle, u32 oclass,
+ void *, u32, struct nvif_object **);
+void nvif_object_ref(struct nvif_object *, struct nvif_object **);
+int nvif_object_ioctl(struct nvif_object *, void *, u32, void **);
+int nvif_object_sclass(struct nvif_object *, u32 *, int);
+u32 nvif_object_rd(struct nvif_object *, int, u64);
+void nvif_object_wr(struct nvif_object *, int, u64, u32);
+int nvif_object_mthd(struct nvif_object *, u32, void *, u32);
+int nvif_object_map(struct nvif_object *);
+void nvif_object_unmap(struct nvif_object *);
+
+#define nvif_object(a) (a)->object
+
+#define ioread8_native ioread8
+#define iowrite8_native iowrite8
+#define nvif_rd(a,b,c) ({ \
+ struct nvif_object *_object = nvif_object(a); \
+ u32 _data; \
+ if (likely(_object->map.ptr)) \
+ _data = ioread##b##_native((u8 *)_object->map.ptr + (c)); \
+ else \
+ _data = nvif_object_rd(_object, (b) / 8, (c)); \
+ _data; \
+})
+#define nvif_wr(a,b,c,d) ({ \
+ struct nvif_object *_object = nvif_object(a); \
+ if (likely(_object->map.ptr)) \
+ iowrite##b##_native((d), (u8 *)_object->map.ptr + (c)); \
+ else \
+ nvif_object_wr(_object, (b) / 8, (c), (d)); \
+})
+#define nvif_rd08(a,b) ({ u8 _v = nvif_rd((a), 8, (b)); _v; })
+#define nvif_rd16(a,b) ({ u16 _v = nvif_rd((a), 16, (b)); _v; })
+#define nvif_rd32(a,b) ({ u32 _v = nvif_rd((a), 32, (b)); _v; })
+#define nvif_wr08(a,b,c) nvif_wr((a), 8, (b), (u8)(c))
+#define nvif_wr16(a,b,c) nvif_wr((a), 16, (b), (u16)(c))
+#define nvif_wr32(a,b,c) nvif_wr((a), 32, (b), (u32)(c))
+#define nvif_mask(a,b,c,d) ({ \
+ u32 _v = nvif_rd32(nvif_object(a), (b)); \
+ nvif_wr32(nvif_object(a), (b), (_v & ~(c)) | (d)); \
+ _v; \
+})
+
+#define nvif_mthd(a,b,c,d) nvif_object_mthd(nvif_object(a), (b), (c), (d))
+
+/*XXX*/
+#include <core/object.h>
+#define nvkm_object(a) ((struct nouveau_object *)nvif_object(a)->priv)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/os.h b/drivers/gpu/drm/nouveau/nvif/os.h
new file mode 120000
index 000000000000..bd744b2cf5cf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/os.h
@@ -0,0 +1 @@
+../core/os.h \ No newline at end of file
diff --git a/drivers/gpu/drm/nouveau/nvif/unpack.h b/drivers/gpu/drm/nouveau/nvif/unpack.h
new file mode 100644
index 000000000000..5933188b4a77
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/unpack.h
@@ -0,0 +1,24 @@
+#ifndef __NVIF_UNPACK_H__
+#define __NVIF_UNPACK_H__
+
+#define nvif_unvers(d) ({ \
+ ret = (size == sizeof(d)) ? 0 : -ENOSYS; \
+ (ret == 0); \
+})
+
+#define nvif_unpack(d,vl,vh,m) ({ \
+ if ((vl) == 0 || ret == -ENOSYS) { \
+ int _size = sizeof(d); \
+ if (_size <= size && (d).version >= (vl) && \
+ (d).version <= (vh)) { \
+ data = (u8 *)data + _size; \
+ size = size - _size; \
+ ret = ((m) || !size) ? 0 : -E2BIG; \
+ } else { \
+ ret = -ENOSYS; \
+ } \
+ } \
+ (ret == 0); \
+})
+
+#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 86f4ead0441d..a94b11f7859d 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -32,8 +32,16 @@ struct omap_connector {
struct drm_connector base;
struct omap_dss_device *dssdev;
struct drm_encoder *encoder;
+ bool hdmi_mode;
};
+bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ return omap_connector->hdmi_mode;
+}
+
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings)
{
@@ -130,7 +138,7 @@ static void omap_connector_destroy(struct drm_connector *connector)
struct omap_dss_device *dssdev = omap_connector->dssdev;
DBG("%s", omap_connector->dssdev->name);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
@@ -162,10 +170,14 @@ static int omap_connector_get_modes(struct drm_connector *connector)
drm_mode_connector_update_edid_property(
connector, edid);
n = drm_add_edid_modes(connector, edid);
+
+ omap_connector->hdmi_mode =
+ drm_detect_hdmi_monitor(edid);
} else {
drm_mode_connector_update_edid_property(
connector, NULL);
}
+
kfree(edid);
} else {
struct drm_display_mode *mode = drm_mode_create(dev);
@@ -307,7 +319,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f926b4caf449..56c60552abba 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
{
- dma_addr_t pat_pa = 0;
+ dma_addr_t pat_pa = 0, data_pa = 0;
uint32_t *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
.lut_id = engine->tcm->lut_id,
};
- data = alloc_dma(txn, 4*i, &pat->data_pa);
+ data = alloc_dma(txn, 4*i, &data_pa);
+ /* FIXME: what if data_pa is more than 32-bit ? */
+ pat->data_pa = data_pa;
while (i--) {
int n = i + roll;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 284b80fc3c54..84d73a61b34b 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -119,13 +119,6 @@ struct omap_drm_private {
struct omap_drm_irq error_handler;
};
-/* this should probably be in drm-core to standardize amongst drivers */
-#define DRM_ROTATE_0 0
-#define DRM_ROTATE_90 1
-#define DRM_ROTATE_180 2
-#define DRM_ROTATE_270 3
-#define DRM_REFLECT_X 4
-#define DRM_REFLECT_Y 5
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
@@ -194,6 +187,7 @@ struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
void omap_connector_flush(struct drm_connector *connector,
int x, int y, int w, int h);
+bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void copy_timings_omap_to_drm(struct drm_display_mode *mode,
struct omap_video_timings *timings);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 5290a88c681d..7445fb1491ae 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -17,6 +17,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <drm/drm_edid.h>
+
#include "omap_drv.h"
#include "drm_crtc.h"
@@ -89,6 +91,31 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->dssdev;
+ struct drm_connector *connector;
+ bool hdmi_mode;
+ int r;
+
+ hdmi_mode = false;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
+ break;
+ }
+ }
+
+ if (dssdev->driver->set_hdmi_mode)
+ dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode);
+
+ if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) {
+ struct hdmi_avi_infoframe avi;
+
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode);
+ if (r == 0)
+ dssdev->driver->set_hdmi_infoframe(dssdev, &avi);
+ }
}
static void omap_encoder_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 1388ca7f87e8..8436c6857cda 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -281,7 +281,7 @@ fail:
return ret;
}
-static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
};
@@ -325,7 +325,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
helper = &fbdev->base;
- helper->funcs = &omap_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper,
priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 95dbce286a41..e4849413ee80 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
WARN_ON(omap_obj->pages);
- /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
- * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
- * we actually want CMA memory for it all anyways..
- */
- pages = drm_gem_get_pages(obj, GFP_KERNEL);
+ pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages);
@@ -791,7 +787,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
omap_obj->paddr = tiler_ssptr(block);
omap_obj->block = block;
- DBG("got paddr: %08x", omap_obj->paddr);
+ DBG("got paddr: %pad", &omap_obj->paddr);
}
omap_obj->paddr_cnt++;
@@ -985,9 +981,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
- off, omap_obj->paddr, omap_obj->paddr_cnt,
+ off, &omap_obj->paddr, omap_obj->paddr_cnt,
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1183,9 +1179,7 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
}
}
spin_unlock(&sync_lock);
-
- if (waiter)
- kfree(waiter);
+ kfree(waiter);
}
return ret;
}
@@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
struct drm_gem_object *obj = NULL;
+ struct address_space *mapping;
size_t size;
int ret;
@@ -1404,14 +1399,16 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->height = gsize.tiled.height;
}
- ret = 0;
- if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
+ if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
drm_gem_private_object_init(dev, obj, size);
- else
+ } else {
ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
- if (ret)
- goto fail;
+ mapping = file_inode(obj->filp)->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
+ }
return obj;
@@ -1467,8 +1464,8 @@ void omap_gem_init(struct drm_device *dev)
entry->paddr = tiler_ssptr(block);
entry->block = block;
- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
- entry->paddr,
+ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
+ &entry->paddr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3cf31ee59aac..891a4dc608af 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
info->out_width, info->out_height,
info->screen_width);
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
- info->paddr, info->p_uv_addr);
+ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
+ &info->paddr, &info->p_uv_addr);
/* TODO: */
ilace = false;
@@ -308,16 +308,13 @@ void omap_plane_install_properties(struct drm_plane *plane,
if (priv->has_dmm) {
prop = priv->rotation_prop;
if (!prop) {
- const struct drm_prop_enum_list props[] = {
- { DRM_ROTATE_0, "rotate-0" },
- { DRM_ROTATE_90, "rotate-90" },
- { DRM_ROTATE_180, "rotate-180" },
- { DRM_ROTATE_270, "rotate-270" },
- { DRM_REFLECT_X, "reflect-x" },
- { DRM_REFLECT_Y, "reflect-y" },
- };
- prop = drm_property_create_bitmask(dev, 0, "rotation",
- props, ARRAY_SIZE(props));
+ prop = drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270) |
+ BIT(DRM_REFLECT_X) |
+ BIT(DRM_REFLECT_Y));
if (prop == NULL)
return;
priv->rotation_prop = prop;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 4ec874da5668..bee9f72b3a93 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -5,7 +5,7 @@ config DRM_PANEL
Panel registration and lookup framework.
menu "Display Panels"
- depends on DRM_PANEL
+ depends on DRM && DRM_PANEL
config DRM_PANEL_SIMPLE
tristate "support for simple panels"
@@ -18,14 +18,11 @@ config DRM_PANEL_SIMPLE
config DRM_PANEL_LD9040
tristate "LD9040 RGB/SPI panel"
- depends on DRM && DRM_PANEL
- depends on OF
- select SPI
+ depends on OF && SPI
select VIDEOMODE_HELPERS
config DRM_PANEL_S6E8AA0
tristate "S6E8AA0 DSI video mode panel"
- depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index db1601fdbe29..42ac67b21e9f 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -110,7 +110,10 @@ struct ld9040 {
int error;
};
-#define panel_to_ld9040(p) container_of(p, struct ld9040, panel)
+static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
+{
+ return container_of(panel, struct ld9040, panel);
+}
static int ld9040_clear_error(struct ld9040 *ctx)
{
@@ -216,6 +219,11 @@ static int ld9040_power_off(struct ld9040 *ctx)
static int ld9040_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int ld9040_unprepare(struct drm_panel *panel)
+{
struct ld9040 *ctx = panel_to_ld9040(panel);
msleep(120);
@@ -228,7 +236,7 @@ static int ld9040_disable(struct drm_panel *panel)
return ld9040_power_off(ctx);
}
-static int ld9040_enable(struct drm_panel *panel)
+static int ld9040_prepare(struct drm_panel *panel)
{
struct ld9040 *ctx = panel_to_ld9040(panel);
int ret;
@@ -242,11 +250,16 @@ static int ld9040_enable(struct drm_panel *panel)
ret = ld9040_clear_error(ctx);
if (ret < 0)
- ld9040_disable(panel);
+ ld9040_unprepare(panel);
return ret;
}
+static int ld9040_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int ld9040_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -273,6 +286,8 @@ static int ld9040_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs ld9040_drm_funcs = {
.disable = ld9040_disable,
+ .unprepare = ld9040_unprepare,
+ .prepare = ld9040_prepare,
.enable = ld9040_enable,
.get_modes = ld9040_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 06e57a26db7a..b5217fe37f02 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -120,7 +120,10 @@ struct s6e8aa0 {
int error;
};
-#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel)
+static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e8aa0, panel);
+}
static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
{
@@ -133,14 +136,14 @@ static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ ssize_t ret;
if (ctx->error < 0)
return;
- ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len);
+ ret = mipi_dsi_dcs_write(dsi, data, len);
if (ret < 0) {
- dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
+ dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
data);
ctx->error = ret;
}
@@ -154,7 +157,7 @@ static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
if (ctx->error < 0)
return ctx->error;
- ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len);
+ ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
if (ret < 0) {
dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
ctx->error = ret;
@@ -889,6 +892,11 @@ static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
static int s6e8aa0_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int s6e8aa0_unprepare(struct drm_panel *panel)
+{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
@@ -900,7 +908,7 @@ static int s6e8aa0_disable(struct drm_panel *panel)
return s6e8aa0_power_off(ctx);
}
-static int s6e8aa0_enable(struct drm_panel *panel)
+static int s6e8aa0_prepare(struct drm_panel *panel)
{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
int ret;
@@ -913,11 +921,16 @@ static int s6e8aa0_enable(struct drm_panel *panel)
ret = ctx->error;
if (ret < 0)
- s6e8aa0_disable(panel);
+ s6e8aa0_unprepare(panel);
return ret;
}
+static int s6e8aa0_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int s6e8aa0_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -944,6 +957,8 @@ static int s6e8aa0_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
.disable = s6e8aa0_disable,
+ .unprepare = s6e8aa0_unprepare,
+ .prepare = s6e8aa0_prepare,
.enable = s6e8aa0_enable,
.get_modes = s6e8aa0_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a25136132c31..4ce1db0a68ff 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -37,14 +37,35 @@ struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
+ unsigned int bpc;
+
struct {
unsigned int width;
unsigned int height;
} size;
+
+ /**
+ * @prepare: the time (in milliseconds) that it takes for the panel to
+ * become ready and start receiving video data
+ * @enable: the time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data
+ * @disable: the time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible)
+ * @unprepare: the time (in milliseconds) that it takes for the panel
+ * to power itself down completely
+ */
+ struct {
+ unsigned int prepare;
+ unsigned int enable;
+ unsigned int disable;
+ unsigned int unprepare;
+ } delay;
};
struct panel_simple {
struct drm_panel base;
+ bool prepared;
bool enabled;
const struct panel_desc *desc;
@@ -87,6 +108,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
@@ -105,21 +127,40 @@ static int panel_simple_disable(struct drm_panel *panel)
backlight_update_status(p->backlight);
}
+ if (p->desc->delay.disable)
+ msleep(p->desc->delay.disable);
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_simple_unprepare(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (!p->prepared)
+ return 0;
+
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->enabled = false;
+
+ if (p->desc->delay.unprepare)
+ msleep(p->desc->delay.unprepare);
+
+ p->prepared = false;
return 0;
}
-static int panel_simple_enable(struct drm_panel *panel)
+static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
int err;
- if (p->enabled)
+ if (p->prepared)
return 0;
err = regulator_enable(p->supply);
@@ -131,6 +172,24 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 1);
+ if (p->desc->delay.prepare)
+ msleep(p->desc->delay.prepare);
+
+ p->prepared = true;
+
+ return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (p->enabled)
+ return 0;
+
+ if (p->desc->delay.enable)
+ msleep(p->desc->delay.enable);
+
if (p->backlight) {
p->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(p->backlight);
@@ -164,6 +223,8 @@ static int panel_simple_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
+ .unprepare = panel_simple_unprepare,
+ .prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
};
@@ -179,22 +240,21 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
+ panel->prepared = false;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
- panel->enable_gpio = devm_gpiod_get(dev, "enable");
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
- if (err != -ENOENT) {
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ dev_err(dev, "failed to request GPIO: %d\n", err);
+ return err;
+ }
- panel->enable_gpio = NULL;
- } else {
+ if (panel->enable_gpio) {
err = gpiod_direction_output(panel->enable_gpio, 0);
if (err < 0) {
dev_err(dev, "failed to setup GPIO: %d\n", err);
@@ -285,6 +345,7 @@ static const struct drm_display_mode auo_b101aw03_mode = {
static const struct panel_desc auo_b101aw03 = {
.modes = &auo_b101aw03_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -307,12 +368,40 @@ static const struct drm_display_mode auo_b133xtn01_mode = {
static const struct panel_desc auo_b133xtn01 = {
.modes = &auo_b133xtn01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 293,
.height = 165,
},
};
+static const struct drm_display_mode auo_b133htn01_mode = {
+ .clock = 150660,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 172,
+ .hsync_end = 1920 + 172 + 80,
+ .htotal = 1920 + 172 + 80 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 25,
+ .vsync_end = 1080 + 25 + 10,
+ .vtotal = 1080 + 25 + 10 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b133htn01 = {
+ .modes = &auo_b133htn01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .prepare = 105,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -329,6 +418,7 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
static const struct panel_desc chunghwa_claa101wa01a = {
.modes = &chunghwa_claa101wa01a_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 220,
.height = 120,
@@ -351,6 +441,7 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = {
static const struct panel_desc chunghwa_claa101wb01 = {
.modes = &chunghwa_claa101wb01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -374,6 +465,7 @@ static const struct drm_display_mode edt_et057090dhu_mode = {
static const struct panel_desc edt_et057090dhu = {
.modes = &edt_et057090dhu_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 115,
.height = 86,
@@ -397,12 +489,82 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = {
static const struct panel_desc edt_etm0700g0dh6 = {
.modes = &edt_etm0700g0dh6_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 152,
.height = 91,
},
};
+static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
+ .clock = 32260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 168,
+ .hsync_end = 800 + 168 + 64,
+ .htotal = 800 + 168 + 64 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 37,
+ .vsync_end = 480 + 37 + 2,
+ .vtotal = 480 + 37 + 2 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc foxlink_fl500wvr00_a0t = {
+ .modes = &foxlink_fl500wvr00_a0t_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bge_mode = {
+ .clock = 71000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 6,
+ .htotal = 1366 + 64 + 6 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 8,
+ .vsync_end = 768 + 8 + 4,
+ .vtotal = 768 + 8 + 4 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_n116bge = {
+ .modes = &innolux_n116bge_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
+static const struct drm_display_mode innolux_n156bge_l21_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 16,
+ .hsync_end = 1366 + 16 + 34,
+ .htotal = 1366 + 16 + 34 + 50,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 6,
+ .vtotal = 768 + 2 + 6 + 12,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_n156bge_l21 = {
+ .modes = &innolux_n156bge_l21_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 344,
+ .height = 193,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -419,6 +581,7 @@ static const struct drm_display_mode lg_lp129qe_mode = {
static const struct panel_desc lg_lp129qe = {
.modes = &lg_lp129qe_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 272,
.height = 181,
@@ -441,6 +604,7 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = {
static const struct panel_desc samsung_ltn101nt05 = {
.modes = &samsung_ltn101nt05_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 1024,
.height = 600,
@@ -452,6 +616,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b133htn01",
+ .data = &auo_b133htn01,
+ }, {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
@@ -470,14 +637,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
+ .compatible = "foxlink,fl500wvr00-a0t",
+ .data = &foxlink_fl500wvr00_a0t,
+ }, {
+ .compatible = "innolux,n116bge",
+ .data = &innolux_n116bge,
+ }, {
+ .compatible = "innolux,n156bge-l21",
+ .data = &innolux_n156bge_l21,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
- .compatible = "simple-panel",
- }, {
/* sentinel */
}
};
@@ -545,7 +719,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.height = 151,
},
},
- .flags = MIPI_DSI_MODE_VIDEO,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
@@ -599,7 +773,8 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.height = 136,
},
},
- .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5d7ea2461852..b8ced08b6291 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -835,7 +835,7 @@ static void qxl_conn_destroy(struct drm_connector *connector)
struct qxl_output *qxl_output =
drm_connector_to_qxl_output(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(qxl_output);
}
@@ -902,7 +902,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
qdev->hotplug_mode_update_property, 0);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index f437b30ce689..df567888bb1e 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -660,7 +660,7 @@ static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
return 0;
}
-static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
.fb_probe = qxl_fb_find_or_create_single,
};
@@ -676,9 +676,12 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
- qfbdev->helper.funcs = &qxl_fb_helper_funcs;
spin_lock_init(&qfbdev->delayed_ops_lock);
INIT_LIST_HEAD(&qfbdev->delayed_ops);
+
+ drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
+ &qxl_fb_helper_funcs);
+
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index d458a140c024..83a423293afd 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index dbcbfe80aac0..0013ad0db9ef 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
# add async DMA block
radeon-y += \
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7d68203a3737..a7f2ddf09a9d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -331,12 +331,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- /* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ /* get the native mode for scaling */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
-
- /* get the native mode for TV */
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ } else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
@@ -346,6 +344,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
+ } else if (radeon_encoder->rmx_type != RMX_OFF) {
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
}
if (ASIC_IS_DCE3(rdev) &&
@@ -716,7 +716,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_connector->use_digital &&
(radeon_connector->audio == RADEON_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
@@ -735,7 +735,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
@@ -755,7 +755,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
} else if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 584090ac3eb9..022561e28707 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -940,7 +940,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
pi->vddc_leakage.count = 0;
pi->vddci_leakage.count = 0;
- if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
+ continue;
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ }
+ } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 8debc9d47362..b630edc2fd0c 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -213,24 +213,37 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- ucode_start_address = BONAIRE_SMC_UCODE_START;
- ucode_size = BONAIRE_SMC_UCODE_SIZE;
- break;
- case CHIP_HAWAII:
- ucode_start_address = HAWAII_SMC_UCODE_START;
- ucode_size = HAWAII_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ ucode_start_address = BONAIRE_SMC_UCODE_START;
+ ucode_size = BONAIRE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAWAII:
+ ucode_start_address = HAWAII_SMC_UCODE_START;
+ ucode_size = HAWAII_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index c0ea66192fe0..b625646bf3e2 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -42,6 +42,16 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+
+MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+MODULE_FIRMWARE("radeon/bonaire_me.bin");
+MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+
MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
MODULE_FIRMWARE("radeon/HAWAII_me.bin");
MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
@@ -51,18 +61,45 @@ MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
+
+MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+MODULE_FIRMWARE("radeon/hawaii_me.bin");
+MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+MODULE_FIRMWARE("radeon/kaveri_me.bin");
+MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+
MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
MODULE_FIRMWARE("radeon/KABINI_me.bin");
MODULE_FIRMWARE("radeon/KABINI_ce.bin");
MODULE_FIRMWARE("radeon/KABINI_mec.bin");
MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+MODULE_FIRMWARE("radeon/kabini_me.bin");
+MODULE_FIRMWARE("radeon/kabini_ce.bin");
+MODULE_FIRMWARE("radeon/kabini_mec.bin");
+MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+
MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
MODULE_FIRMWARE("radeon/MULLINS_me.bin");
MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
@@ -70,6 +107,13 @@ MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
+MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+MODULE_FIRMWARE("radeon/mullins_me.bin");
+MODULE_FIRMWARE("radeon/mullins_ce.bin");
+MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
@@ -1760,27 +1804,44 @@ static void cik_srbm_select(struct radeon_device *rdev,
*/
int ci_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- io_mc_regs = (u32 *)&bonaire_io_mc_regs;
- regs_size = BONAIRE_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAWAII:
- io_mc_regs = (u32 *)&hawaii_io_mc_regs;
- regs_size = HAWAII_IO_MC_REGS_SIZE;
- break;
- default:
- return -EINVAL;
+ radeon_ucode_print_mc_hdr(&hdr->header);
+
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ io_mc_regs = (u32 *)&bonaire_io_mc_regs;
+ regs_size = BONAIRE_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAWAII:
+ io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+ regs_size = HAWAII_IO_MC_REGS_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1797,13 +1858,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1841,17 +1910,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
static int cik_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size = 0,
sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
char fw_name[30];
+ int new_fw = 0;
int err;
+ int num_fw;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
+ new_chip_name = "bonaire";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1861,9 +1934,11 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
+ new_chip_name = "hawaii";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1873,142 +1948,285 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
+ new_chip_name = "kaveri";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KV_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 7;
break;
case CHIP_KABINI:
chip_name = "KABINI";
+ new_chip_name = "kabini";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KB_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
case CHIP_MULLINS:
chip_name = "MULLINS";
+ new_chip_name = "mullins";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = ML_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->mec_fw->size != mec_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->mec_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mec_fw->size != mec_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mec_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->mec_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
+ }
+
+ if (rdev->family == CHIP_KAVERI) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
+ err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
+ if (err) {
+ goto out;
+ } else {
+ err = radeon_ucode_validate(rdev->mec2_fw);
+ if (err) {
+ goto out;
+ } else {
+ new_fw++;
+ }
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->sdma_fw->size != sdma_req_size) {
- printk(KERN_ERR
- "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
- rdev->sdma_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->sdma_fw->size != sdma_req_size) {
+ printk(KERN_ERR
+ "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
+ rdev->sdma_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->sdma_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
/* No SMC, MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)){
+ printk(KERN_ERR
+ "cik_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)){
- printk(KERN_ERR
- "cik_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "cik_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "cik_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < num_fw) {
+ printk(KERN_ERR "ci_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
+
out:
if (err) {
if (err != -EINVAL)
@@ -2021,8 +2239,14 @@ out:
rdev->me_fw = NULL;
release_firmware(rdev->ce_fw);
rdev->ce_fw = NULL;
+ release_firmware(rdev->mec_fw);
+ rdev->mec_fw = NULL;
+ release_firmware(rdev->mec2_fw);
+ rdev->mec2_fw = NULL;
release_firmware(rdev->rlc_fw);
rdev->rlc_fw = NULL;
+ release_firmware(rdev->sdma_fw);
+ rdev->sdma_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
release_firmware(rdev->smc_fw);
@@ -3666,8 +3890,6 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
/**
@@ -3696,8 +3918,6 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(addr));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3969,7 +4189,6 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
*/
static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
@@ -3977,26 +4196,70 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
cik_cp_gfx_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4261,7 +4524,6 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
*/
static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->mec_fw)
@@ -4269,20 +4531,55 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
cik_cp_compute_enable(rdev, false);
- /* MEC1 */
- fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *mec_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&mec_hdr->header);
+
+ /* MEC1 */
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- if (rdev->family == CHIP_KAVERI) {
/* MEC2 */
+ if (rdev->family == CHIP_KAVERI) {
+ const struct gfx_firmware_header_v1_0 *mec2_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data +
+ le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
+ } else {
+ const __be32 *fw_data;
+
+ /* MEC1 */
fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+
+ if (rdev->family == CHIP_KAVERI) {
+ /* MEC2 */
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
}
return 0;
@@ -4375,7 +4672,7 @@ static int cik_mec_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev,
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->mec.hpd_eop_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -4545,7 +4842,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
r = radeon_bo_create(rdev,
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->ring[idx].mqd_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
@@ -5402,7 +5699,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
@@ -5642,12 +5938,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
+ int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
if (vm == NULL)
return;
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
radeon_ring_write(ring,
@@ -5697,7 +5994,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 1 << vm->id);
/* compute doesn't have PFP */
- if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
+ if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
@@ -5865,28 +6162,10 @@ static void cik_rlc_start(struct radeon_device *rdev)
static int cik_rlc_resume(struct radeon_device *rdev)
{
u32 i, size, tmp;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- default:
- size = BONAIRE_RLC_UCODE_SIZE;
- break;
- case CHIP_KAVERI:
- size = KV_RLC_UCODE_SIZE;
- break;
- case CHIP_KABINI:
- size = KB_RLC_UCODE_SIZE;
- break;
- case CHIP_MULLINS:
- size = ML_RLC_UCODE_SIZE;
- break;
- }
-
cik_rlc_stop(rdev);
/* disable CG */
@@ -5910,11 +6189,45 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
WREG32(RLC_GPM_UCODE_ADDR, 0);
- for (i = 0; i < size; i++)
- WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ default:
+ size = BONAIRE_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KAVERI:
+ size = KV_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KABINI:
+ size = KB_RLC_UCODE_SIZE;
+ break;
+ case CHIP_MULLINS:
+ size = ML_RLC_UCODE_SIZE;
+ break;
+ }
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ }
/* XXX - find out what chips support lbpw */
cik_enable_lbpw(rdev, false);
@@ -6348,11 +6661,10 @@ static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
void cik_init_cp_pg_table(struct radeon_device *rdev)
{
- const __be32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me = 4;
u32 bo_offset = 0;
- u32 table_offset;
+ u32 table_offset, table_size;
if (rdev->family == CHIP_KAVERI)
max_me = 5;
@@ -6363,24 +6675,71 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
/* write the cp table buffer */
dst_ptr = rdev->rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
- if (me == 0) {
- fw_data = (const __be32 *)rdev->ce_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 1) {
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 2) {
- fw_data = (const __be32 *)rdev->me_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
+ if (rdev->new_fw) {
+ const __le32 *fw_data;
+ const struct gfx_firmware_header_v1_0 *hdr;
+
+ if (me == 0) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
} else {
- fw_data = (const __be32 *)rdev->mec_fw->data;
- table_offset = CP_MEC_TABLE_OFFSET;
- }
+ const __be32 *fw_data;
+ table_size = CP_ME_TABLE_SIZE;
+
+ if (me == 0) {
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 1) {
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 2) {
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else {
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ table_offset = CP_MEC_TABLE_OFFSET;
+ }
- for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
- dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
}
- bo_offset += CP_ME_TABLE_SIZE;
}
}
@@ -7618,7 +7977,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
@@ -7900,6 +8260,7 @@ restart_ih:
static int cik_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
+ u32 nop;
int r;
/* enable pcie gen2/3 link */
@@ -8033,9 +8394,18 @@ static int cik_startup(struct radeon_device *rdev)
}
cik_irq_set(rdev);
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->new_fw)
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ else
+ nop = RADEON_CP_PACKET2;
+ } else {
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ }
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
@@ -8043,7 +8413,7 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
ring->me = 1; /* first MEC */
@@ -8054,7 +8424,7 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
/* dGPU only have 1 MEC */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 8e9d0f1d858e..bcf480510ac2 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_ucode.h"
#include "radeon_asic.h"
#include "radeon_trace.h"
#include "cikd.h"
@@ -118,6 +119,7 @@ void cik_sdma_set_wptr(struct radeon_device *rdev,
reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+ (void)RREG32(reg);
}
/**
@@ -419,7 +421,6 @@ static int cik_sdma_rlc_resume(struct radeon_device *rdev)
*/
static int cik_sdma_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->sdma_fw)
@@ -428,19 +429,48 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
/* halt the MEs */
cik_sdma_enable(rdev, false);
- /* sdma0 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- /* sdma1 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ if (rdev->new_fw) {
+ const struct sdma_firmware_header_v1_0 *hdr =
+ (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_sdma_hdr(&hdr->header);
+
+ /* sdma0 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ } else {
+ const __be32 *fw_data;
+
+ /* sdma0 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ }
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
@@ -719,7 +749,43 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * cik_sdma_vm_set_page - update the page tables using sDMA
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0x1FFFF8)
+ bytes = 0x1FFFF8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -729,84 +795,103 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using sDMA (CIK).
+ * Update PTEs by writing them manually using sDMA (CIK).
*/
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
}
}
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 0a65dc7e93e7..ab29f953a767 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -136,13 +136,13 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
AUDIO_LIPSYNC(connector->audio_latency[1]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
} else {
if (connector->latency_present[0])
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
AUDIO_LIPSYNC(connector->audio_latency[0]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
@@ -164,8 +164,10 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -173,7 +175,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
@@ -225,8 +227,10 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -234,7 +238,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 15e4f28015e1..4fedd14e670a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2424,7 +2424,6 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -2677,7 +2676,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
@@ -4023,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -4101,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4175,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.cp_table_size) {
if (rdev->rlc.cp_table_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
+ PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.cp_table_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4961,7 +4964,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1ec0e6e83f9f..278c7a139d74 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -117,7 +117,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
@@ -172,7 +172,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5a33ca681867..327b85f7fd0d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1229,7 +1229,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 6378e0276691..8a3e6221cece 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * cayman_dma_vm_set_page - update the page tables using the DMA
+ * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += ndw * 4;
+ src += ndw * 4;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -315,71 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: hw access flags
+ * @flags: hw access flags
*
- * Update the page tables using the DMA (cayman/TN).
+ * Update PTEs by writing them manually using the DMA (cayman/TN).
*/
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & R600_PTE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & R600_PTE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
+}
+
+/**
+ * cayman_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 1544efcf1c3a..04b5940b8923 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -652,7 +652,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
@@ -683,7 +682,7 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
}
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(addr));
@@ -838,11 +837,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
- RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+ r100_ring_hdp_flush(rdev, ring);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
@@ -1061,6 +1056,20 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
(void)RREG32(RADEON_CP_RB_WPTR);
}
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1401,7 +1410,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
*/
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, waitreloc;
@@ -1441,12 +1449,11 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
@@ -4067,39 +4074,6 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
- uint32_t ret;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
- return ret;
- }
-}
-
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
- }
-}
-
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3c21d77a483d..75b30338c226 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,17 +69,23 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
mb();
}
+#define R300_PTE_UNSNOOPED (1 << 0)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = rdev->gart.ptr;
addr = (lower_32_bits(addr) >> 8) |
- ((upper_32_bits(addr) & 0xff) << 24) |
- R300_PTE_WRITEABLE | R300_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 24);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R300_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R300_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ addr |= R300_PTE_UNSNOOPED;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -120,7 +126,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 3c69f58e46ef..c70a504d96af 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -968,7 +968,6 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -1339,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->vram_scratch.robj);
+ 0, NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -3227,7 +3226,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_GTT, 0,
NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
@@ -3924,11 +3923,13 @@ restart_ih:
break;
case 9: /* D1 pflip */
DRM_DEBUG("IH: D1 flip\n");
- radeon_crtc_handle_flip(rdev, 0);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 0);
break;
case 11: /* D2 pflip */
DRM_DEBUG("IH: D2 flip\n");
- radeon_crtc_handle_flip(rdev, 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 1);
break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
@@ -4089,16 +4090,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
}
/**
- * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
* rdev: radeon device structure
- * bo: buffer object struct which userspace is waiting for idle
*
- * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
- * through ring buffer, this leads to corruption in rendering, see
- * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
- * directly perform HDP flush by writing register through MMIO.
+ * Some R6XX/R7XX don't seem to take into account HDP flushes performed
+ * through the ring buffer. This leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
+ * directly perform the HDP flush by writing the register through MMIO.
*/
-void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+void r600_mmio_hdp_flush(struct radeon_device *rdev)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 12511bb5fd6f..c47537a1ddba 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -825,7 +825,6 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
uint32_t *vline_status)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, wait_reg_mem;
@@ -887,12 +886,11 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = R600_CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 60c47f829122..9e1732eb402c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -64,6 +64,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/interval_tree.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -103,6 +104,7 @@ extern int radeon_hard_reset;
extern int radeon_vm_size;
extern int radeon_vm_block_size;
extern int radeon_deep_color;
+extern int radeon_use_pflipirq;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -304,6 +306,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
u16 *vddc, u16 *vddci,
u16 virtual_voltage_id,
u16 vbios_voltage_id);
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
@@ -317,6 +322,9 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
struct atom_voltage_table *voltage_table);
bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode);
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
u32 mem_clock);
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
@@ -441,14 +449,12 @@ struct radeon_mman {
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- uint64_t soffset;
- uint64_t eoffset;
uint32_t flags;
- bool valid;
+ uint64_t addr;
unsigned ref_count;
/* protected by vm mutex */
- struct list_head vm_list;
+ struct interval_tree_node it;
struct list_head vm_status;
/* constant after initialization */
@@ -465,6 +471,7 @@ struct radeon_bo {
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ u32 flags;
unsigned pin_count;
void *kptr;
u32 tiling_flags;
@@ -543,9 +550,9 @@ struct radeon_gem {
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj);
int radeon_mode_dumb_create(struct drm_file *file_priv,
@@ -590,6 +597,12 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+#define RADEON_GART_PAGE_DUMMY 0
+#define RADEON_GART_PAGE_VALID (1 << 0)
+#define RADEON_GART_PAGE_READ (1 << 1)
+#define RADEON_GART_PAGE_WRITE (1 << 2)
+#define RADEON_GART_PAGE_SNOOP (1 << 3)
+
struct radeon_gart {
dma_addr_t table_addr;
struct radeon_bo *robj;
@@ -614,8 +627,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
- dma_addr_t *dma_addr);
-void radeon_gart_restore(struct radeon_device *rdev);
+ dma_addr_t *dma_addr, uint32_t flags);
/*
@@ -855,9 +867,9 @@ struct radeon_mec {
#define R600_PTE_FRAG_64KB (4 << 7)
#define R600_PTE_FRAG_256KB (6 << 7)
-/* flags used for GART page table entries on R600+ */
-#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
- | R600_PTE_READABLE | R600_PTE_WRITEABLE)
+/* flags needed to be set so we can copy directly from the GART table */
+#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
+ R600_PTE_SYSTEM | R600_PTE_VALID )
struct radeon_vm_pt {
struct radeon_bo *bo;
@@ -865,9 +877,12 @@ struct radeon_vm_pt {
};
struct radeon_vm {
- struct list_head va;
+ struct rb_root va;
unsigned id;
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
/* BOs freed, but not yet updated in the PT */
struct list_head freed;
@@ -1740,6 +1755,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1763,13 +1779,8 @@ struct radeon_asic {
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*asic_reset)(struct radeon_device *rdev);
- /* ioctl hw specific callback. Some hw might want to perform special
- * operation on specific ioctl. For instance on wait idle some hw
- * might want to perform and HDP flush through MMIO as it seems that
- * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
- * through ring.
- */
- void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ /* Flush the HDP cache via MMIO */
+ void (*mmio_hdp_flush)(struct radeon_device *rdev);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
@@ -1782,16 +1793,26 @@ struct radeon_asic {
struct {
void (*tlb_flush)(struct radeon_device *rdev);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
- void (*set_page)(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ void (*copy_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+ void (*write_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*set_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*pad_ib)(struct radeon_ib *ib);
} vm;
/* ring specific callbacks */
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
@@ -2299,10 +2320,12 @@ struct radeon_device {
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
const struct firmware *mec_fw; /* CIK MEC firmware */
+ const struct firmware *mec2_fw; /* KV MEC2 firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
const struct firmware *uvd_fw; /* UVD firmware */
const struct firmware *vce_fw; /* VCE firmware */
+ bool new_fw;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2342,6 +2365,11 @@ struct radeon_device {
struct dev_pm_domain vga_pm_domain;
bool have_disp_power_ref;
+ u32 px_quirk_flags;
+
+ /* tracking pinned memory */
+ u64 vram_pin_size;
+ u64 gart_pin_size;
};
bool radeon_is_px(struct drm_device *dev);
@@ -2352,10 +2380,42 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect);
+#define RADEON_MIN_MMIO_SIZE 0x10000
+
+static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
+{
+ /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
+ }
+}
+
+static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
+{
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+ }
+}
+
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
@@ -2709,10 +2769,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
@@ -2840,6 +2903,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 34b9aa9e3c06..eeeeabe09758 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -185,6 +185,7 @@ static struct radeon_asic_ring r100_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r100_asic = {
@@ -194,7 +195,7 @@ static struct radeon_asic r100_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
@@ -260,7 +261,7 @@ static struct radeon_asic r200_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
@@ -331,6 +332,7 @@ static struct radeon_asic_ring r300_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r300_asic = {
@@ -340,7 +342,7 @@ static struct radeon_asic r300_asic = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -406,7 +408,7 @@ static struct radeon_asic r300_asic_pcie = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -472,7 +474,7 @@ static struct radeon_asic r420_asic = {
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -538,7 +540,7 @@ static struct radeon_asic rs400_asic = {
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
@@ -604,7 +606,7 @@ static struct radeon_asic rs600_asic = {
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
@@ -672,7 +674,7 @@ static struct radeon_asic rs690_asic = {
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
@@ -740,7 +742,7 @@ static struct radeon_asic rv515_asic = {
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
@@ -806,7 +808,7 @@ static struct radeon_asic r520_asic = {
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
@@ -898,7 +900,7 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -970,7 +972,7 @@ static struct radeon_asic rv6xx_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1060,7 +1062,7 @@ static struct radeon_asic rs780_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1163,7 +1165,7 @@ static struct radeon_asic rv770_asic = {
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1281,7 +1283,7 @@ static struct radeon_asic evergreen_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1373,7 +1375,7 @@ static struct radeon_asic sumo_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1464,7 +1466,7 @@ static struct radeon_asic btc_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1599,7 +1601,7 @@ static struct radeon_asic cayman_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1611,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1699,7 +1704,7 @@ static struct radeon_asic trinity_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1711,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1829,7 +1837,7 @@ static struct radeon_asic si_asic = {
.resume = &si_resume,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &si_get_xclk,
@@ -1841,7 +1849,10 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .set_page = &si_dma_vm_set_page,
+ .copy_pages = &si_dma_vm_copy_pages,
+ .write_pages = &si_dma_vm_write_pages,
+ .set_pages = &si_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1987,7 +1998,7 @@ static struct radeon_asic ci_asic = {
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
@@ -1999,7 +2010,10 @@ static struct radeon_asic ci_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2091,7 +2105,7 @@ static struct radeon_asic kv_asic = {
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
@@ -2103,7 +2117,10 @@ static struct radeon_asic kv_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2457,7 +2474,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2476,7 +2493,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2502,7 +2519,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2530,7 +2547,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 01e7c0ad8f01..275a5dc01780 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -68,7 +68,7 @@ int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -148,7 +148,8 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
-
+void r100_ring_hdp_flush(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/*
* r200,rv250,rs300,rv280
*/
@@ -173,7 +174,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -209,7 +210,7 @@ extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -233,7 +234,7 @@ void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -351,7 +352,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
@@ -606,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -693,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -771,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
+
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 173f378428a9..92b2d8dd4735 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"asc7xxx",
};
@@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"RV6xx",
"RV770",
@@ -3236,6 +3236,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
return 0;
}
+union get_voltage_info {
+ struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
+ struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
+};
+
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage)
+{
+ int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
+ u32 entry_id;
+ u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
+ union get_voltage_info args;
+
+ for (entry_id = 0; entry_id < count; entry_id++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
+ virtual_voltage_id)
+ break;
+ }
+
+ if (entry_id >= count)
+ return -EINVAL;
+
+ args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ args.in.ulSCLKFreq =
+ cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
+
+ return 0;
+}
+
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
@@ -3397,6 +3432,50 @@ radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
return false;
}
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ union voltage_object_info *voltage_info;
+ union voltage_object *voltage_object = NULL;
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ voltage_info = (union voltage_object_info *)
+ (rdev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 3:
+ switch (crev) {
+ case 1:
+ voltage_object = (union voltage_object *)
+ atom_lookup_voltage_object_v3(&voltage_info->v3,
+ voltage_type,
+ VOLTAGE_OBJ_SVID2);
+ if (voltage_object) {
+ *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+ *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
int radeon_atom_get_max_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *max_voltage)
{
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 6e05a2e75a46..69f5695bdab9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 44831197e82e..300c4b3d4669 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -107,7 +107,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -115,7 +115,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -124,7 +124,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -148,7 +148,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
break;
}
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* hdmi deep color only implemented on DCE4+ */
if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
@@ -197,10 +197,19 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
connector->name, bpc);
}
}
+ else if (bpc > 8) {
+ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+ connector->name);
+ bpc = 8;
+ }
}
- if ((radeon_deep_color == 0) && (bpc > 8))
+ if ((radeon_deep_color == 0) && (bpc > 8)) {
+ DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
+ connector->name);
bpc = 8;
+ }
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
@@ -216,7 +225,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
- struct drm_mode_object *obj;
bool connected;
int i;
@@ -226,14 +234,11 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev,
+ connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -249,7 +254,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
@@ -257,34 +261,134 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
if (encoder->encoder_type == encoder_type)
return encoder;
}
return NULL;
}
+struct edid *radeon_connector_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
+
+ if (radeon_connector->edid) {
+ return radeon_connector->edid;
+ } else if (edid_blob) {
+ struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
+ if (edid)
+ radeon_connector->edid = edid;
+ }
+ return radeon_connector->edid;
+}
+
+static void radeon_connector_get_edid(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid)
+ return;
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) &&
+ radeon_connector->ddc_bus->has_aux) {
+ radeon_connector->edid = drm_get_edid(connector,
+ &radeon_connector->ddc_bus->aux.ddc);
+ } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
+ radeon_connector->ddc_bus->has_aux)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->aux.ddc);
+ else if (radeon_connector->ddc_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else if (radeon_connector->ddc_bus) {
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ }
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else {
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
+ }
+}
+
+static void radeon_connector_free_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+}
+
+static int radeon_ddc_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+ ret = drm_add_edid_modes(connector, radeon_connector->edid);
+ drm_edid_to_eld(connector, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
+static void radeon_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct radeon_encoder *radeon_encoder;
+
+ if (encoder == NULL)
+ return;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode =
+ list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ radeon_encoder->native_mode = *preferred_mode;
+ } else {
+ radeon_encoder->native_mode.clock = 0;
+ }
+}
+
/*
* radeon_connector_analog_encoder_conflict_solve
* - search for other connectors sharing this encoder
@@ -585,6 +689,35 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_property_change_mode(&radeon_encoder->base);
}
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum radeon_rmx_type rmx_type;
+
+ if (connector->encoder)
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ else {
+ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+ }
+
+ switch (val) {
+ default:
+ case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+ case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+ case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ }
+ if (radeon_encoder->rmx_type == rmx_type)
+ return 0;
+
+ if ((rmx_type != DRM_MODE_SCALE_NONE) &&
+ (radeon_encoder->native_mode.clock == 0))
+ return 0;
+
+ radeon_encoder->rmx_type = rmx_type;
+
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+
return 0;
}
@@ -625,22 +758,20 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
static int radeon_lvds_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
- if (radeon_connector->ddc_bus) {
- ret = radeon_ddc_get_modes(radeon_connector);
- if (ret > 0) {
- encoder = radeon_best_single_encoder(connector);
- if (encoder) {
- radeon_fixup_lvds_native_mode(encoder, connector);
- /* add scaled modes */
- radeon_add_common_modes(encoder, connector);
- }
- return ret;
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+ if (ret > 0) {
+ encoder = radeon_best_single_encoder(connector);
+ if (encoder) {
+ radeon_fixup_lvds_native_mode(encoder, connector);
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
}
+ return ret;
}
encoder = radeon_best_single_encoder(connector);
@@ -715,16 +846,9 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
}
/* check for edid as well */
+ radeon_connector_get_edid(connector);
if (radeon_connector->edid)
ret = connector_status_connected;
- else {
- if (radeon_connector->ddc_bus) {
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- if (radeon_connector->edid)
- ret = connector_status_connected;
- }
- }
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
@@ -737,10 +861,9 @@ static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->edid)
- kfree(radeon_connector->edid);
+ radeon_connector_free_edid(connector);
kfree(radeon_connector->con_priv);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -797,10 +920,12 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
static int radeon_vga_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
return ret;
}
@@ -843,28 +968,26 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
+ }
}
} else {
@@ -999,15 +1122,6 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.set_property = radeon_connector_set_property,
};
-static int radeon_dvi_get_modes(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_ddc_get_modes(radeon_connector);
- return ret;
-}
-
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1048,7 +1162,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_mode_object *obj;
int i, r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1066,18 +1179,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
- if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
+ radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
connector->name);
@@ -1087,18 +1198,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
broken_edid = true; /* defer use_digital to later */
}
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
-
+ }
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
@@ -1118,8 +1229,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
}
}
@@ -1153,14 +1263,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev,
+ connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1225,19 +1332,16 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1252,13 +1356,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -1291,7 +1390,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1310,7 +1409,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
- .get_modes = radeon_dvi_get_modes,
+ .get_modes = radeon_vga_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
@@ -1339,7 +1438,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
@@ -1350,7 +1450,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
}
if (ret > 0) {
@@ -1383,7 +1484,10 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
}
return ret;
@@ -1391,7 +1495,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1400,11 +1503,10 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1419,9 +1521,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
return ENCODER_OBJECT_ID_NONE;
}
-bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1431,11 +1532,10 @@ bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1478,10 +1578,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
goto out;
}
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
+ radeon_connector_free_edid(connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
@@ -1587,7 +1684,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return radeon_dp_mode_valid_helper(connector, mode);
} else {
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1747,6 +1844,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1768,6 +1868,10 @@ radeon_add_atom_connector(struct drm_device *dev,
0);
drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
@@ -1817,6 +1921,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -1835,6 +1943,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
@@ -1868,17 +1980,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -1918,17 +2031,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1965,18 +2079,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
-
- }
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -2050,7 +2164,7 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (has_aux)
radeon_dp_aux_init(radeon_connector);
@@ -2211,5 +2325,5 @@ radeon_add_legacy_connector(struct drm_device *dev,
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ae763f60c8a0..ee712c199b25 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -500,7 +500,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
if (r)
return r;
}
- return 0;
+
+ return radeon_vm_clear_invalids(rdev, vm);
}
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 697add2cd4e3..c8ea050c8fa4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,31 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
+#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+
+struct radeon_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
+static struct radeon_px_quirk radeon_px_quirk_list[] = {
+ /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74551
+ */
+ { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+ /* macbook pro 8.2 */
+ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
+ { 0, 0, 0, 0, 0 },
+};
+
bool radeon_is_px(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -112,6 +137,26 @@ bool radeon_is_px(struct drm_device *dev)
return false;
}
+static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
+{
+ struct radeon_px_quirk *p = radeon_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ rdev->px_quirk_flags = p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+
+ if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
+ rdev->flags &= ~RADEON_IS_PX;
+}
+
/**
* radeon_program_register_sequence - program an array of registers.
*
@@ -385,7 +430,8 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
+ &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -1077,7 +1123,19 @@ static void radeon_check_arguments(struct radeon_device *rdev)
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
- if (radeon_vm_block_size < 9) {
+ if (radeon_vm_block_size == -1) {
+
+ /* Total bits covered by PD + PTs */
+ unsigned bits = ilog2(radeon_vm_size) + 17;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+ if (radeon_vm_size <= 8)
+ radeon_vm_block_size = bits - 9;
+ else
+ radeon_vm_block_size = (bits + 3) / 2;
+
+ } else if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
@@ -1092,25 +1150,6 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
/**
- * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
- * needed for waking up.
- *
- * @pdev: pci dev pointer
- */
-static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
-{
-
- /* 6600m in a macbook pro */
- if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
- pdev->subsystem_device == 0x00e2) {
- printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
- return true;
- }
-
- return false;
-}
-
-/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
@@ -1122,6 +1161,7 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct radeon_device *rdev = dev->dev_private;
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
@@ -1133,7 +1173,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
dev->pdev->d3_delay = 20;
radeon_resume_kms(dev, true, true);
@@ -1337,6 +1377,9 @@ int radeon_device_init(struct radeon_device *rdev,
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
+ if (rdev->flags & RADEON_IS_PX)
+ radeon_device_handle_px_quirks(rdev);
+
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bf25061c8ac4..3fdf87318069 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -293,6 +293,18 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
if (radeon_crtc == NULL)
return;
+ /* Skip the pageflip completion check below (based on polling) on
+ * asics which reliably support hw pageflip completion irqs. pflip
+ * irqs are a reliable and race-free method of handling pageflip
+ * completion detection. A use_pflipirq module parameter < 2 allows
+ * to override this in case of asics with faulty pflip irqs.
+ * A module parameter of 0 would only use this polling based path,
+ * a parameter of 1 would use pflip irq only as a backup to this
+ * path, as in Linux 3.16.
+ */
+ if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
+ return;
+
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
@@ -823,64 +835,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
return ret;
}
-int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
-{
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- int ret = 0;
-
- /* don't leak the edid if we already fetched it in detect() */
- if (radeon_connector->edid)
- goto got_edid;
-
- /* on hw with routers, select right port */
- if (radeon_connector->router.ddc_valid)
- radeon_router_select_ddc_port(radeon_connector);
-
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE) {
- if (radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
- struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
-
- if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
- dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
- radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- else if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- } else {
- if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- }
-
- if (!radeon_connector->edid) {
- if (rdev->is_atom_bios) {
- /* some laptops provide a hardcoded edid in rom for LCDs */
- if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- } else
- /* some servers provide a hardcoded edid in rom for KVMs */
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- }
- if (radeon_connector->edid) {
-got_edid:
- drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
- ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
- drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
- return ret;
- }
- drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
- return 0;
-}
-
/* avivo */
/**
@@ -1749,7 +1703,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
is_hdtv_mode(mode)))) {
if (radeon_encoder->underscan_hborder != 0)
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 959f0866d993..092d067f93e1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -82,9 +82,11 @@
* 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
* 2.39.0 - Add INFO query for number of active CUs
+ * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
+ * CS to GPU
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 39
+#define KMS_DRIVER_MINOR 40
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -174,9 +176,10 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4;
-int radeon_vm_block_size = 9;
+int radeon_vm_size = 8;
+int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
+int radeon_use_pflipirq = 2;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -247,12 +250,15 @@ module_param_named(hard_reset, radeon_hard_reset, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
-MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
+MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, radeon_deep_color, int, 0444);
+MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
+module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bd4959ca23aa..3c2094c25b53 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -343,7 +343,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
@@ -365,7 +365,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 665ced3b7313..94b0f2aa3d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -127,8 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, true,
- &gobj);
+ 0, true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
@@ -331,7 +330,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
return 0;
}
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
.fb_probe = radeonfb_create,
@@ -353,7 +352,9 @@ int radeon_fbdev_init(struct radeon_device *rdev)
rfbdev->rdev = rdev;
rdev->mode_info.rfbdev = rfbdev;
- rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+ drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
+ &radeon_fb_helper_funcs);
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 2e723651069b..a053a0779aac 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->gart.robj);
+ 0, NULL, &rdev->gart.robj);
if (r) {
return r;
}
@@ -243,7 +243,8 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base,
+ RADEON_GART_PAGE_DUMMY);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
@@ -261,13 +262,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: RADEON_GART_PAGE_* flags
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist, dma_addr_t *dma_addr)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr,
+ uint32_t flags)
{
unsigned t;
unsigned p;
@@ -287,7 +290,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base, flags);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
@@ -298,33 +301,6 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
}
/**
- * radeon_gart_restore - bind all pages in the gart page table
- *
- * @rdev: radeon_device pointer
- *
- * Binds all pages in the gart page table (all asics).
- * Used to rebuild the gart table on device startup or resume.
- */
-void radeon_gart_restore(struct radeon_device *rdev)
-{
- int i, j, t;
- u64 page_base;
-
- if (!rdev->gart.ptr) {
- return;
- }
- for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
- page_base = rdev->gart.pages_addr[i];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
- page_base += RADEON_GPU_PAGE_SIZE;
- }
- }
- mb();
- radeon_gart_tlb_flush(rdev);
-}
-
-/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d09650c1d720..bfd7e1b0ff3f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -40,9 +40,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
}
}
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
@@ -55,23 +55,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
alignment = PAGE_SIZE;
}
- /* maximun bo size is the minimun btw visible vram and gtt size */
- max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ /* Maximum bo size is the unpinned gtt size since we use the gtt to
+ * handle vram to system pool migrations.
+ */
+ max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
if (size > max_size) {
- printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
- __func__, __LINE__, size >> 20, max_size >> 20);
+ DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
+ size >> 20, max_size >> 20);
return -ENOMEM;
}
retry:
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
+ flags, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
@@ -208,18 +211,15 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
- unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = (u64)man->size << PAGE_SHIFT;
- if (rdev->stollen_vga_memory)
- args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
- for(i = 0; i < RADEON_NUM_RINGS; ++i)
- args->gart_size -= rdev->ring[i].ring_size;
+ args->vram_visible -= rdev->vram_pin_size;
+ args->gart_size = rdev->mc.gtt_size;
+ args->gart_size -= rdev->gart_pin_size;
+
return 0;
}
@@ -252,8 +252,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, &gobj);
+ args->initial_domain, args->flags,
+ false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
@@ -358,16 +358,18 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
+ uint32_t cur_placement = 0;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, NULL, false);
- /* callback hw specific functions if any */
- if (rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+ r = radeon_bo_wait(robj, &cur_placement, false);
+ /* Flush HDP cache via MMIO if necessary */
+ if (rdev->asic->mmio_hdp_flush &&
+ radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+ robj->rdev->asic->mmio_hdp_flush(rdev);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
@@ -461,11 +463,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
}
- if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
- dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
switch (args->operation) {
case RADEON_VA_MAP:
@@ -499,9 +496,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case RADEON_VA_MAP:
- if (bo_va->soffset) {
+ if (bo_va->it.start) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
- args->offset = bo_va->soffset;
+ args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
goto out;
}
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -572,9 +569,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
args->size = ALIGN(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_device,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM, 0,
+ false, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
new file mode 100644
index 000000000000..65b0c213488d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ * Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size)
+{
+ int r;
+
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+ return r;
+ }
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
+ if (r) {
+ return r;
+ }
+
+ ib->ring = ring;
+ ib->fence = NULL;
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->vm = vm;
+ if (vm) {
+ /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+ * space and soffset is the offset inside the pool bo
+ */
+ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ } else {
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ }
+ ib->is_const_ib = false;
+
+ return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ int r = 0;
+
+ if (!ib->length_dw || !ring->ready) {
+ /* TODO: Nothings in the ib we should report. */
+ dev_err(rdev->dev, "couldn't schedule ib\n");
+ return -EINVAL;
+ }
+
+ /* 64 dwords should be enough for fence too */
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
+ if (r) {
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+ return r;
+ }
+
+ /* grab a vm id if necessary */
+ if (ib->vm) {
+ struct radeon_fence *vm_id_fence;
+ vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
+ radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
+ }
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ if (ib->vm)
+ radeon_vm_flush(rdev, ib->vm, ib->ring);
+
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
+
+ if (ib->vm)
+ radeon_vm_fence(rdev, ib->vm, ib->fence);
+
+ radeon_ring_unlock_commit(rdev, ring);
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+
+ if (rdev->family >= CHIP_BONAIRE) {
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC);
+ } else {
+ /* Before CIK, it's better to stick to cacheable GTT due
+ * to the command stream checking
+ */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT, 0);
+ }
+ if (r) {
+ return r;
+ }
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+ }
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
+ }
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+ rdev->needs_reset = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+ return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d25ae6acfd5a..eb7164d07985 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -254,7 +254,18 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
break;
case RADEON_INFO_ACCEL_WORKING2:
- *value = rdev->accel_working;
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->accel_working) {
+ if (rdev->new_fw)
+ *value = 3;
+ else
+ *value = 2;
+ } else {
+ *value = 0;
+ }
+ } else {
+ *value = rdev->accel_working;
+ }
break;
case RADEON_INFO_TILING_CONFIG:
if (rdev->family >= CHIP_BONAIRE)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0592ddb0904b..e27608c29c11 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -685,10 +685,11 @@ extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
-extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+extern struct edid *radeon_connector_edid(struct drm_connector *connector);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
@@ -738,7 +739,6 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
-extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6c717b257d6d..480c87d8edc5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,16 +46,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-static void radeon_bo_clear_va(struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va, *tmp;
-
- list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
- /* remove from all vm address space */
- radeon_vm_bo_rmv(bo->rdev, bo_va);
- }
-}
-
static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
{
@@ -90,7 +80,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
- radeon_bo_clear_va(bo);
+ WARN_ON(!list_empty(&bo->va));
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -114,15 +104,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ (rbo->rdev->flags & RADEON_IS_AGP)) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_TT;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
}
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_SYSTEM;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
}
@@ -146,7 +144,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
- struct sg_table *sg, struct radeon_bo **bo_ptr)
+ u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -183,6 +181,12 @@ int radeon_bo_create(struct radeon_device *rdev,
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT |
RADEON_GEM_DOMAIN_CPU);
+
+ bo->flags = flags;
+ /* PCI GART is always snooped */
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
@@ -232,6 +236,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
ttm_bo_kunmap(&bo->kmap);
}
+struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
+{
+ if (bo == NULL)
+ return NULL;
+
+ ttm_bo_reference(&bo->tbo);
+ return bo;
+}
+
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
@@ -241,9 +254,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
- up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
@@ -292,9 +303,13 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
- }
- if (unlikely(r != 0))
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ bo->rdev->vram_pin_size += radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size += radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+ }
return r;
}
@@ -317,8 +332,14 @@ int radeon_bo_unpin(struct radeon_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r != 0))
+ if (likely(r == 0)) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ bo->rdev->vram_pin_size -= radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size -= radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+ }
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5a873f31a171..98a47fdf3625 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -124,11 +124,12 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
- bool kernel, u32 domain,
+ bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
@@ -170,7 +171,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain);
+ unsigned size, u32 align, u32 domain,
+ u32 flags);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index e447e390d09a..23314be49480 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1303,10 +1303,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV770:
- case CHIP_BARTS:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- case CHIP_CAYMAN:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1330,6 +1326,10 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -1400,9 +1400,7 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
}
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1421,9 +1419,7 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
radeon_dpm_fini(rdev);
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 28d71070c389..0b16f2cbcf17 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, &bo);
+ RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f8050f5429e2..5b4e0cf231a0 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,258 +26,8 @@
* Jerome Glisse
* Christian König
*/
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
#include "radeon.h"
-#include "atom.h"
-
-/*
- * IB
- * IBs (Indirect Buffers) and areas of GPU accessible memory where
- * commands are stored. You can put a pointer to the IB in the
- * command ring and the hw will fetch the commands from the IB
- * and execute them. Generally userspace acceleration drivers
- * produce command buffers which are send to the kernel and
- * put in IBs for execution by the requested ring.
- */
-static int radeon_debugfs_sa_init(struct radeon_device *rdev);
-
-/**
- * radeon_ib_get - request an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ring: ring index the IB is associated with
- * @ib: IB object returned
- * @size: requested IB size
- *
- * Request an IB (all asics). IBs are allocated using the
- * suballocator.
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib *ib, struct radeon_vm *vm,
- unsigned size)
-{
- int r;
-
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
- if (r) {
- dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
- return r;
- }
-
- r = radeon_semaphore_create(rdev, &ib->semaphore);
- if (r) {
- return r;
- }
-
- ib->ring = ring;
- ib->fence = NULL;
- ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
- ib->vm = vm;
- if (vm) {
- /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
- * space and soffset is the offset inside the pool bo
- */
- ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
- } else {
- ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
- }
- ib->is_const_ib = false;
-
- return 0;
-}
-
-/**
- * radeon_ib_free - free an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to free
- *
- * Free an IB (all asics).
- */
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
- radeon_fence_unref(&ib->fence);
-}
-
-/**
- * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- * @const_ib: Const IB to schedule (SI only)
- *
- * Schedule an IB on the associated ring (all asics).
- * Returns 0 on success, error on failure.
- *
- * On SI, there are two parallel engines fed from the primary ring,
- * the CE (Constant Engine) and the DE (Drawing Engine). Since
- * resource descriptors have moved to memory, the CE allows you to
- * prime the caches while the DE is updating register state so that
- * the resource descriptors will be already in cache when the draw is
- * processed. To accomplish this, the userspace driver submits two
- * IBs, one for the CE and one for the DE. If there is a CE IB (called
- * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
- * to SI there was just a DE IB.
- */
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
- int r = 0;
-
- if (!ib->length_dw || !ring->ready) {
- /* TODO: Nothings in the ib we should report. */
- dev_err(rdev->dev, "couldn't schedule ib\n");
- return -EINVAL;
- }
-
- /* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
- if (r) {
- dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
- return r;
- }
-
- /* grab a vm id if necessary */
- if (ib->vm) {
- struct radeon_fence *vm_id_fence;
- vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
- radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
- }
-
- /* sync with other rings */
- r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- if (ib->vm)
- radeon_vm_flush(rdev, ib->vm, ib->ring);
-
- if (const_ib) {
- radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
- radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
- }
- radeon_ring_ib_execute(rdev, ib->ring, ib);
- r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
- if (const_ib) {
- const_ib->fence = radeon_fence_ref(ib->fence);
- }
-
- if (ib->vm)
- radeon_vm_fence(rdev, ib->vm, ib->fence);
-
- radeon_ring_unlock_commit(rdev, ring);
- return 0;
-}
-
-/**
- * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Initialize the suballocator to manage a pool of memory
- * for use as IBs (all asics).
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_pool_init(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->ib_pool_ready) {
- return 0;
- }
- r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
-
- r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
- if (r) {
- return r;
- }
-
- rdev->ib_pool_ready = true;
- if (radeon_debugfs_sa_init(rdev)) {
- dev_err(rdev->dev, "failed to register debugfs file for SA\n");
- }
- return 0;
-}
-
-/**
- * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Tear down the suballocator managing the pool of memory
- * for use as IBs (all asics).
- */
-void radeon_ib_pool_fini(struct radeon_device *rdev)
-{
- if (rdev->ib_pool_ready) {
- radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
- radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
- rdev->ib_pool_ready = false;
- }
-}
-
-/**
- * radeon_ib_ring_tests - test IBs on the rings
- *
- * @rdev: radeon_device pointer
- *
- * Test an IB (Indirect Buffer) on each ring.
- * If the test fails, disable the ring.
- * Returns 0 on success, error if the primary GFX ring
- * IB test fails.
- */
-int radeon_ib_ring_tests(struct radeon_device *rdev)
-{
- unsigned i;
- int r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_ring *ring = &rdev->ring[i];
-
- if (!ring->ready)
- continue;
-
- r = radeon_ib_test(rdev, i, ring);
- if (r) {
- ring->ready = false;
- rdev->needs_reset = false;
-
- if (i == RADEON_RING_TYPE_GFX_INDEX) {
- /* oh, oh, that's really bad */
- DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
- rdev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
- }
- }
- }
- return 0;
-}
/*
* Rings
@@ -433,11 +183,21 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
+ /* If we are emitting the HDP flush via the ring buffer, we need to
+ * do it before padding.
+ */
+ if (rdev->asic->ring[ring->idx]->hdp_flush)
+ rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
mb();
+ /* If we are emitting the HDP flush via MMIO, we need to do it after
+ * all CPU writes to VRAM finished.
+ */
+ if (rdev->asic->mmio_hdp_flush)
+ rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
}
@@ -641,6 +401,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
+ (rdev->flags & RADEON_IS_PCIE) ?
+ RADEON_GEM_GTT_WC : 0,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
@@ -791,22 +553,6 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
};
-static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
-
- return 0;
-
-}
-
-static struct drm_info_list radeon_debugfs_sa_list[] = {
- {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
-};
-
#endif
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
@@ -828,12 +574,3 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri
#endif
return 0;
}
-
-static int radeon_debugfs_sa_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index adcf3e2f07da..b84f97c8718c 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned size, u32 align, u32 domain, u32 flags)
{
int i, r;
@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
}
r = radeon_bo_create(rdev, size, align, true,
- domain, NULL, &sa_manager->bo);
+ domain, flags, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 3a13e0d1055c..5adf4207453d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -56,13 +56,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- n -= rdev->ring[i].ring_size;
- if (rdev->wb.wb_obj)
- n -= RADEON_GPU_PAGE_SIZE;
- if (rdev->ih.ring_obj)
- n -= rdev->ih.ring_size;
+ n = rdev->mc.gtt_size - rdev->gart_pin_size;
n /= size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -73,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &vram_obj);
+ 0, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -93,7 +87,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
struct radeon_fence *fence = NULL;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f749f2c3bbdb..9db74a96ef61 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -72,8 +72,8 @@ TRACE_EVENT(radeon_vm_bo_update,
),
TP_fast_assign(
- __entry->soffset = bo_va->soffset;
- __entry->eoffset = bo_va->eoffset;
+ __entry->soffset = bo_va->it.start;
+ __entry->eoffset = bo_va->it.last + 1;
__entry->flags = bo_va->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
@@ -104,6 +104,24 @@ TRACE_EVENT(radeon_vm_set_page,
__entry->flags, __entry->count)
);
+TRACE_EVENT(radeon_vm_flush,
+ TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
+ TP_ARGS(pd_addr, ring, id),
+ TP_STRUCT__entry(
+ __field(u64, pd_addr)
+ __field(u32, ring)
+ __field(u32, id)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_addr = pd_addr;
+ __entry->ring = ring;
+ __entry->id = id;
+ ),
+ TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
+ __entry->pd_addr, __entry->ring, __entry->id)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c8a8a5144ec1..72afe82a95c9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -521,6 +521,8 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
+ uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
+ RADEON_GART_PAGE_WRITE;
int r;
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
@@ -528,8 +530,10 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (ttm->caching_state == tt_cached)
+ flags |= RADEON_GART_PAGE_SNOOP;
+ r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
@@ -726,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM, 0,
NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.c b/drivers/gpu/drm/radeon/radeon_ucode.c
new file mode 100644
index 000000000000..6beec680390c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_ucode.h"
+
+static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr)
+{
+ DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
+ DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
+ DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
+ DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
+ DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
+ DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
+ DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
+ DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
+ DRM_DEBUG("ucode_array_offset_bytes: %u\n",
+ le32_to_cpu(hdr->ucode_array_offset_bytes));
+ DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
+}
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("MC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct mc_firmware_header_v1_0 *mc_hdr =
+ container_of(hdr, struct mc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("io_debug_size_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_size_bytes));
+ DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
+ } else {
+ DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SMC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct smc_firmware_header_v1_0 *smc_hdr =
+ container_of(hdr, struct smc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
+ } else {
+ DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("GFX\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct gfx_firmware_header_v1_0 *gfx_hdr =
+ container_of(hdr, struct gfx_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(gfx_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("RLC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct rlc_firmware_header_v1_0 *rlc_hdr =
+ container_of(hdr, struct rlc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("master_pkt_description_offset: %u\n",
+ le32_to_cpu(rlc_hdr->master_pkt_description_offset));
+ } else {
+ DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SDMA\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct sdma_firmware_header_v1_0 *sdma_hdr =
+ container_of(hdr, struct sdma_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_feature_version));
+ DRM_DEBUG("ucode_change_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_change_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
+ version_major, version_minor);
+ }
+}
+
+int radeon_ucode_validate(const struct firmware *fw)
+{
+ const struct common_firmware_header *hdr =
+ (const struct common_firmware_header *)fw->data;
+
+ if (fw->size == le32_to_cpu(hdr->size_bytes))
+ return 0;
+
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 4e7c3269b183..dc4576e4d8ad 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -153,4 +153,75 @@
#define HAWAII_SMC_UCODE_START 0x20000
#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
+struct common_firmware_header {
+ uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
+ uint32_t header_size_bytes; /* size of just the header in bytes */
+ uint16_t header_version_major; /* header version */
+ uint16_t header_version_minor; /* header version */
+ uint16_t ip_version_major; /* IP version */
+ uint16_t ip_version_minor; /* IP version */
+ uint32_t ucode_version;
+ uint32_t ucode_size_bytes; /* size of ucode in bytes */
+ uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
+ uint32_t crc32; /* crc32 checksum of the payload */
+};
+
+/* version_major=1, version_minor=0 */
+struct mc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t io_debug_size_bytes; /* size of debug array in dwords */
+ uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
+};
+
+/* version_major=1, version_minor=0 */
+struct smc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_start_addr;
+};
+
+/* version_major=1, version_minor=0 */
+struct gfx_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct rlc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t save_and_restore_offset;
+ uint32_t clear_state_descriptor_offset;
+ uint32_t avail_scratch_ram_locations;
+ uint32_t master_pkt_description_offset;
+};
+
+/* version_major=1, version_minor=0 */
+struct sdma_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ucode_change_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* header is fixed size */
+union radeon_firmware_header {
+ struct common_firmware_header common;
+ struct mc_firmware_header_v1_0 mc;
+ struct smc_firmware_header_v1_0 smc;
+ struct gfx_firmware_header_v1_0 gfx;
+ struct rlc_firmware_header_v1_0 rlc;
+ struct sdma_firmware_header_v1_0 sdma;
+ uint8_t raw[0x100];
+};
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+int radeon_ucode_validate(const struct firmware *fw);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index a4ad270e8261..6bf55ec85b62 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
@@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index aa21c31a846c..f9b70a43aa52 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 725d3669014f..ccae4d9dc3de 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -238,8 +238,8 @@ void radeon_vm_flush(struct radeon_device *rdev,
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
/* if we can't remember our last VM flush then flush now! */
- /* XXX figure out why we have to flush all the time */
- if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+ if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
+ trace_radeon_vm_flush(pd_addr, ring, vm->id);
vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm);
}
@@ -325,17 +325,15 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->soffset = 0;
- bo_va->eoffset = 0;
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
bo_va->flags = 0;
- bo_va->valid = false;
+ bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->vm_list);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
- list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
@@ -343,6 +341,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
}
/**
+ * radeon_vm_set_pages - helper to call the right asic function
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void radeon_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
+
+ } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
+ radeon_asic_vm_write_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+
+ } else {
+ radeon_asic_vm_set_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
* radeon_vm_clear_bo - initially clear the page dir/table
*
* @rdev: radeon_device pointer
@@ -376,14 +410,15 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
- r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
- NULL, entries * 2 + 64);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
goto error;
ib.length_dw = 0;
- radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > 64);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r)
@@ -419,11 +454,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
uint32_t flags)
{
uint64_t size = radeon_bo_size(bo_va->bo);
- uint64_t eoffset, last_offset = 0;
struct radeon_vm *vm = bo_va->vm;
- struct radeon_bo_va *tmp;
- struct list_head *head;
unsigned last_pfn, pt_idx;
+ uint64_t eoffset;
int r;
if (soffset) {
@@ -445,51 +478,49 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_lock(&vm->mutex);
- head = &vm->va;
- last_offset = 0;
- list_for_each_entry(tmp, &vm->va, vm_list) {
- if (bo_va == tmp) {
- /* skip over currently modified bo */
- continue;
+ if (bo_va->it.start || bo_va->it.last) {
+ if (bo_va->addr) {
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->addr = bo_va->addr;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
+ list_add(&tmp->vm_status, &vm->freed);
}
- if (soffset >= last_offset && eoffset <= tmp->soffset) {
- /* bo can be added before this one */
- break;
- }
- if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
- /* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
- bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
- (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
- mutex_unlock(&vm->mutex);
- return -EINVAL;
- }
- last_offset = tmp->eoffset;
- head = &tmp->vm_list;
+ interval_tree_remove(&bo_va->it, &vm->va);
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
}
- if (bo_va->soffset) {
- /* add a clone of the bo_va to clear the old address */
- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (!tmp) {
+ soffset /= RADEON_GPU_PAGE_SIZE;
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ if (it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
- return -ENOMEM;
+ return -EINVAL;
}
- tmp->soffset = bo_va->soffset;
- tmp->eoffset = bo_va->eoffset;
- tmp->vm = vm;
- list_add(&tmp->vm_status, &vm->freed);
+ bo_va->it.start = soffset;
+ bo_va->it.last = eoffset - 1;
+ interval_tree_insert(&bo_va->it, &vm->va);
}
- bo_va->soffset = soffset;
- bo_va->eoffset = eoffset;
bo_va->flags = flags;
- bo_va->valid = false;
- list_move(&bo_va->vm_list, head);
+ bo_va->addr = 0;
- soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
- eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
+ soffset >>= radeon_vm_block_size;
+ eoffset >>= radeon_vm_block_size;
BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
@@ -510,7 +541,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
if (r)
return r;
@@ -611,7 +642,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 16;
+ ndw += vm->max_pde_used * 6;
/* update too big for an IB */
if (ndw > 0xfffff)
@@ -640,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, &ib, last_pde,
- last_pt, count, incr,
- R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde,
+ last_pt, count, incr,
+ R600_PTE_VALID);
}
count = 1;
@@ -654,12 +685,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
}
if (count)
- radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
- incr, R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
+ incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
+ radeon_asic_vm_pad_ib(rdev, &ib);
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+ WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -725,30 +758,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
(frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
return;
}
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
addr += RADEON_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+ radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += RADEON_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
}
}
@@ -777,9 +810,6 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
unsigned count = 0;
uint64_t addr;
- start = start / RADEON_GPU_PAGE_SIZE;
- end = end / RADEON_GPU_PAGE_SIZE;
-
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> radeon_vm_block_size;
@@ -842,55 +872,73 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- unsigned nptes, ndw;
+ unsigned nptes, ncmds, ndw;
uint64_t addr;
+ uint32_t flags;
int r;
-
- if (!bo_va->soffset) {
+ if (!bo_va->it.start) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo_va->bo, vm);
return -EINVAL;
}
- if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
- return 0;
+ list_del_init(&bo_va->vm_status);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
- bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
+ bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
+
} else {
addr += rdev->vm_manager.vram_base_offset;
}
} else {
addr = 0;
- bo_va->valid = false;
}
+ if (addr == bo_va->addr)
+ return 0;
+ bo_va->addr = addr;
+
trace_radeon_vm_bo_update(bo_va);
- nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
+ nptes = bo_va->it.last - bo_va->it.start + 1;
+
+ /* reserve space for one command every (1 << BLOCK_SIZE) entries
+ or 2k dwords (whatever is smaller) */
+ ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
/* padding, etc. */
ndw = 64;
- if (radeon_vm_block_size > 11)
- /* reserve space for one header for every 2k dwords */
- ndw += (nptes >> 11) * 4;
- else
- /* reserve space for one header for
- every (1 << BLOCK_SIZE) entries */
- ndw += (nptes >> radeon_vm_block_size) * 4;
+ flags = radeon_vm_page_flags(bo_va->flags);
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ /* only copy commands needed */
+ ndw += ncmds * 7;
- /* reserve space for pte addresses */
- ndw += nptes * 2;
+ } else if (flags & R600_PTE_SYSTEM) {
+ /* header for write data commands */
+ ndw += ncmds * 4;
+
+ /* body of write data command */
+ ndw += nptes * 2;
+
+ } else {
+ /* set page commands needed */
+ ndw += ncmds * 10;
+
+ /* two extra commands for begin/end of fragment */
+ ndw += 2 * 10;
+ }
/* update too big for an IB */
if (ndw > 0xfffff)
@@ -901,8 +949,12 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return r;
ib.length_dw = 0;
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
- addr, radeon_vm_page_flags(bo_va->flags));
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+ bo_va->it.last + 1, addr,
+ radeon_vm_page_flags(bo_va->flags));
+
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
@@ -936,8 +988,8 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
int r;
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
- list_del(&bo_va->vm_status);
r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ radeon_bo_unref(&bo_va->bo);
kfree(bo_va);
if (r)
return r;
@@ -947,6 +999,31 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
}
/**
+ * radeon_vm_clear_invalids - clear invalidated BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all invalidated BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
@@ -964,10 +1041,11 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
- list_del(&bo_va->vm_list);
+ interval_tree_remove(&bo_va->it, &vm->va);
+ list_del(&bo_va->vm_status);
- if (bo_va->soffset) {
- bo_va->bo = NULL;
+ if (bo_va->addr) {
+ bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
kfree(bo_va);
@@ -991,7 +1069,12 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- bo_va->valid = false;
+ if (bo_va->addr) {
+ mutex_lock(&bo_va->vm->mutex);
+ list_del(&bo_va->vm_status);
+ list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
+ mutex_unlock(&bo_va->vm->mutex);
+ }
}
}
@@ -1016,7 +1099,8 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
- INIT_LIST_HEAD(&vm->va);
+ vm->va = RB_ROOT;
+ INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
@@ -1031,7 +1115,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
}
r = radeon_bo_create(rdev, pd_size, align, true,
- RADEON_GEM_DOMAIN_VRAM, NULL,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&vm->page_directory);
if (r)
return r;
@@ -1060,11 +1144,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!list_empty(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
- list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
- list_del_init(&bo_va->vm_list);
+ rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
+ interval_tree_remove(&bo_va->it, &vm->va);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
@@ -1072,8 +1156,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
- list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ radeon_bo_unref(&bo_va->bo);
kfree(bo_va);
+ }
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a0f96decece3..6c1fc339d228 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -109,7 +109,6 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg;
uint32_t tmp;
- radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -209,17 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev)
radeon_gart_table_ram_free(rdev);
}
+#define RS400_PTE_UNSNOOPED (1 << 0)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
- ((upper_32_bits(addr) & 0xff) << 4) |
- RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 4);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= RS400_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= RS400_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry);
gtt[i] = entry;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d1a35cb1c91d..5f6db4629aaa 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -555,7 +555,6 @@ static int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
@@ -626,15 +625,21 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- if (addr == rdev->dummy_page.addr)
- addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- else
- addr |= R600_PTE_GART;
+ addr |= R600_PTE_SYSTEM;
+ if (flags & RADEON_GART_PAGE_VALID)
+ addr |= R600_PTE_VALID;
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R600_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R600_PTE_WRITEABLE;
+ if (flags & RADEON_GART_PAGE_SNOOP)
+ addr |= R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8));
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index da8703d8d455..2983f17ea1b3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -900,7 +900,6 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 9e854fd016da..011779bd2b3d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -42,6 +42,14 @@ MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
@@ -49,6 +57,14 @@ MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
@@ -56,6 +72,14 @@ MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
MODULE_FIRMWARE("radeon/VERDE_smc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
@@ -63,6 +87,14 @@ MODULE_FIRMWARE("radeon/OLAND_mc.bin");
MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
MODULE_FIRMWARE("radeon/OLAND_smc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
@@ -71,6 +103,13 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_mc.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
@@ -1470,38 +1509,54 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
/* ucode loading */
int si_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
+
+ radeon_ucode_print_mc_hdr(&hdr->header);
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
- switch (rdev->family) {
- case CHIP_TAHITI:
- io_mc_regs = (u32 *)&tahiti_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_PITCAIRN:
- io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_VERDE:
- default:
- io_mc_regs = (u32 *)&verde_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_OLAND:
- io_mc_regs = (u32 *)&oland_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAINAN:
- io_mc_regs = (u32 *)&hainan_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_VERDE:
+ default:
+ io_mc_regs = (u32 *)&verde_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAINAN:
+ io_mc_regs = (u32 *)&hainan_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1518,13 +1573,21 @@ int si_mc_load_microcode(struct radeon_device *rdev)
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1553,18 +1616,19 @@ int si_mc_load_microcode(struct radeon_device *rdev)
static int si_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
- const char *rlc_chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
size_t smc_req_size, mc2_req_size;
char fw_name[30];
int err;
+ int new_fw = 0;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
- rlc_chip_name = "TAHITI";
+ new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1575,7 +1639,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
- rlc_chip_name = "PITCAIRN";
+ new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1586,7 +1650,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
- rlc_chip_name = "VERDE";
+ new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1597,7 +1661,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
- rlc_chip_name = "OLAND";
+ new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1607,7 +1671,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
- rlc_chip_name = "HAINAN";
+ new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1618,86 +1682,178 @@ static int si_init_microcode(struct radeon_device *rdev)
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "si_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)) {
+ printk(KERN_ERR
+ "si_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)) {
- printk(KERN_ERR
- "si_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "si_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "si_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < 6) {
+ printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
out:
if (err) {
if (err != -EINVAL)
@@ -3282,34 +3438,77 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
static int si_cp_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
- if (!rdev->me_fw || !rdev->pfp_fw)
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
return -EINVAL;
si_cp_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < SI_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4048,7 +4247,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
@@ -4815,7 +5013,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
@@ -5592,7 +5790,6 @@ static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
static int si_rlc_resume(struct radeon_device *rdev)
{
u32 i;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
@@ -5615,10 +5812,26 @@ static int si_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
- for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ for (i = 0; i < fw_size; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ }
+ } else {
+ const __be32 *fw_data =
+ (const __be32 *)rdev->rlc_fw->data;
+ for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
}
WREG32(RLC_UCODE_ADDR, 0);
@@ -6318,7 +6531,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index e24c94b6d14d..716505129450 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -56,7 +56,41 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * si_dma_vm_set_page - update the page tables using the DMA
+ * si_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (SI).
+ */
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0xFFFF8)
+ bytes = 0xFFFF8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * si_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -66,83 +100,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the DMA (SI).
+ * Update PTEs by writing them manually using the DMA (SI).
*/
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0xFFFF8)
- bytes = 0xFFFF8;
-
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
- 1, 0, 0, bytes);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * si_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 58918868f894..70e61ffeace2 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3812,6 +3812,27 @@ void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
voltage_table->count = max_voltage_steps;
}
+static int si_get_svi2_voltage_table(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
static int si_construct_voltage_tables(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -3819,15 +3840,25 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
struct si_power_info *si_pi = si_get_pi(rdev);
int ret;
- ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
+ if (pi->voltage_control) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
- if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddc_voltage_table);
+ if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
+ } else if (si_pi->voltage_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
if (eg_pi->vddci_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3840,6 +3871,13 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
SISLANDS_MAX_NO_VREG_STEPS,
&eg_pi->vddci_voltage_table);
}
+ if (si_pi->vddci_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
if (pi->mvdd_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
@@ -3893,46 +3931,55 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
struct si_power_info *si_pi = si_get_pi(rdev);
u8 i;
- if (eg_pi->vddc_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
-
- for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
- if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
- table->maxVDDCIndexInPPTable = i;
- break;
+ if (si_pi->voltage_control_svi2) {
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+ si_pi->svc_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+ si_pi->svd_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+ 2);
+ } else {
+ if (eg_pi->vddc_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+ for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+ if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+ table->maxVDDCIndexInPPTable = i;
+ break;
+ }
}
}
- }
- if (eg_pi->vddci_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
+ if (eg_pi->vddci_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
- if (si_pi->mvdd_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
+ if (si_pi->mvdd_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
- cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+ cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+ }
- if (si_pi->vddc_phase_shed_control) {
- if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
- &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
- si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
+ if (si_pi->vddc_phase_shed_control) {
+ if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+ si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
- si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
- (u32)si_pi->vddc_phase_shed_table.phase_delay);
- } else {
- si_pi->vddc_phase_shed_control = false;
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+ (u32)si_pi->vddc_phase_shed_table.phase_delay);
+ } else {
+ si_pi->vddc_phase_shed_control = false;
+ }
}
}
@@ -5798,16 +5845,17 @@ int si_dpm_enable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct si_power_info *si_pi = si_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
if (si_is_smc_running(rdev))
return -EINVAL;
- if (pi->voltage_control)
+ if (pi->voltage_control || si_pi->voltage_control_svi2)
si_enable_voltage_control(rdev, true);
if (pi->mvdd_control)
si_get_mvdd_configuration(rdev);
- if (pi->voltage_control) {
+ if (pi->voltage_control || si_pi->voltage_control_svi2) {
ret = si_construct_voltage_tables(rdev);
if (ret) {
DRM_ERROR("si_construct_voltage_tables failed\n");
@@ -6406,16 +6454,32 @@ int si_dpm_init(struct radeon_device *rdev)
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
pi->voltage_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!pi->voltage_control) {
+ si_pi->voltage_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_SVID2);
+ if (si_pi->voltage_control_svi2)
+ radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+ }
pi->mvdd_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
eg_pi->vddci_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!eg_pi->vddci_control)
+ si_pi->vddci_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_SVID2);
si_pi->vddc_phase_shed_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT);
rv770_get_engine_memory_ss(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index 4ce5032cdf49..8b5c06a0832d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -170,6 +170,8 @@ struct si_power_info {
bool vddc_phase_shed_control;
bool pspp_notify_required;
bool sclk_deep_sleep_above_low;
+ bool voltage_control_svi2;
+ bool vddci_control_svi2;
/* smc offsets */
u32 sram_end;
u32 state_table_start;
@@ -192,6 +194,9 @@ struct si_power_info {
SMC_SIslands_MCRegisters smc_mc_reg_table;
SISLANDS_SMC_STATETABLE smc_statetable;
PP_SIslands_PAPMParameters papm_parm;
+ /* SVI2 */
+ u8 svd_gpio_id;
+ u8 svc_gpio_id;
};
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index e80efcf0c230..73dbc79c959d 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -219,36 +219,48 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_TAHITI:
- ucode_start_address = TAHITI_SMC_UCODE_START;
- ucode_size = TAHITI_SMC_UCODE_SIZE;
- break;
- case CHIP_PITCAIRN:
- ucode_start_address = PITCAIRN_SMC_UCODE_START;
- ucode_size = PITCAIRN_SMC_UCODE_SIZE;
- break;
- case CHIP_VERDE:
- ucode_start_address = VERDE_SMC_UCODE_START;
- ucode_size = VERDE_SMC_UCODE_SIZE;
- break;
- case CHIP_OLAND:
- ucode_start_address = OLAND_SMC_UCODE_START;
- ucode_size = OLAND_SMC_UCODE_SIZE;
- break;
- case CHIP_HAINAN:
- ucode_start_address = HAINAN_SMC_UCODE_START;
- ucode_size = HAINAN_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ ucode_start_address = TAHITI_SMC_UCODE_START;
+ ucode_size = TAHITI_SMC_UCODE_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ ucode_start_address = PITCAIRN_SMC_UCODE_START;
+ ucode_size = PITCAIRN_SMC_UCODE_SIZE;
+ break;
+ case CHIP_VERDE:
+ ucode_start_address = VERDE_SMC_UCODE_START;
+ ucode_size = VERDE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_OLAND:
+ ucode_start_address = OLAND_SMC_UCODE_START;
+ ucode_size = OLAND_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAINAN:
+ ucode_start_address = HAINAN_SMC_UCODE_START;
+ ucode_size = HAINAN_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 10e945a49479..623a0b1e2d9d 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -241,6 +241,9 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 792fd1d20e86..fda64b7b73e8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -187,7 +187,7 @@ static struct drm_driver rcar_du_driver = {
* Power management
*/
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index a87edfac111f..76026104d000 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -135,7 +135,9 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
struct rcar_du_device *rcdu = dev->dev_private;
const struct rcar_du_format_info *format;
+ unsigned int max_pitch;
unsigned int align;
+ unsigned int bpp;
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
@@ -144,13 +146,20 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
+ /*
+ * The pitch and alignment constraints are expressed in pixels on the
+ * hardware side and in bytes in the DRM API.
+ */
+ bpp = format->planes == 2 ? 1 : format->bpp / 8;
+ max_pitch = 4096 * bpp;
+
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
- align = 16 * format->bpp / 8;
+ align = 16 * bpp;
if (mode_cmd->pitches[0] & (align - 1) ||
- mode_cmd->pitches[0] >= 8192) {
+ mode_cmd->pitches[0] >= max_pitch) {
dev_dbg(dev->dev, "invalid pitch value %u\n",
mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 289048d1c7b2..21426bd234eb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -64,7 +64,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -105,7 +105,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_sysfs_connector_add(connector);
+ ret = drm_connector_register(connector);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index ccfe64c7188f..8af3944d31b9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -32,7 +32,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -70,7 +70,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_sysfs_connector_add(connector);
+ ret = drm_connector_register(connector);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index faf176b2daf9..47875de89010 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -692,7 +692,7 @@ static void shmob_drm_connector_destroy(struct drm_connector *connector)
struct shmob_drm_connector *scon = to_shmob_connector(connector);
shmob_drm_backlight_exit(scon);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -726,7 +726,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_sysfs_connector_add(connector);
+ ret = drm_connector_register(connector);
if (ret < 0)
goto err_cleanup;
@@ -749,7 +749,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
err_backlight:
shmob_drm_backlight_exit(&sdev->connector);
err_sysfs:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
err_cleanup:
drm_connector_cleanup(connector);
return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 82c84c7fd4f6..ff4ba483b602 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -297,7 +297,7 @@ static struct drm_driver shmob_drm_driver = {
* Power management
*/
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int shmob_drm_pm_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
new file mode 100644
index 000000000000..2d9d4252d598
--- /dev/null
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -0,0 +1,14 @@
+config DRM_STI
+ tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
+ depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ help
+ Choose this option to enable DRM on STM stiH41x chipset
+
+config DRM_STI_FBDEV
+ bool "DRM frame buffer device for STMicroelectronics SoC stiH41x Serie"
+ depends on DRM_STI
+ help
+ Choose this option to enable FBDEV on top of DRM for STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
new file mode 100644
index 000000000000..04ac2ceef27f
--- /dev/null
+++ b/drivers/gpu/drm/sti/Makefile
@@ -0,0 +1,21 @@
+sticompositor-y := \
+ sti_layer.o \
+ sti_mixer.o \
+ sti_gdp.o \
+ sti_vid.o \
+ sti_compositor.o \
+ sti_drm_crtc.o \
+ sti_drm_plane.o
+
+stihdmi-y := sti_hdmi.o \
+ sti_hdmi_tx3g0c55phy.o \
+ sti_hdmi_tx3g4c28phy.o \
+
+obj-$(CONFIG_DRM_STI) = \
+ sti_vtg.o \
+ sti_vtac.o \
+ stihdmi.o \
+ sti_hda.o \
+ sti_tvout.o \
+ sticompositor.o \
+ sti_drm_drv.o \ No newline at end of file
diff --git a/drivers/gpu/drm/sti/NOTES b/drivers/gpu/drm/sti/NOTES
new file mode 100644
index 000000000000..57e257969198
--- /dev/null
+++ b/drivers/gpu/drm/sti/NOTES
@@ -0,0 +1,58 @@
+1. stiH display hardware IP
+---------------------------
+The STMicroelectronics stiH SoCs use a common chain of HW display IP blocks:
+- The High Quality Video Display Processor (HQVDP) gets video frames from a
+ video decoder and does high quality video processing, including scaling.
+
+- The Compositor is a multiplane, dual-mixer (Main & Aux) digital processor. It
+ has several inputs:
+ - The graphics planes are internally processed by the Generic Display
+ Pipeline (GDP).
+ - The video plug (VID) connects to the HQVDP output.
+ - The cursor handles ... a cursor.
+- The TV OUT pre-formats (convert, clip, round) the compositor output data
+- The HDMI / DVO / HD Analog / SD analog IP builds the video signals
+ - DVO (Digital Video Output) handles a 24bits parallel signal
+ - The HD analog signal is typically driven by a YCbCr cable, supporting up to
+ 1080i mode.
+ - The SD analog signal is typically used for legacy TV
+- The VTG (Video Timing Generators) build Vsync signals used by the other HW IP
+Note that some stiH drivers support only a subset of thee HW IP.
+
+ .-------------. .-----------. .-----------.
+GPU >-------------+GDP Main | | +---+ HDMI +--> HDMI
+GPU >-------------+GDP mixer+---+ | :===========:
+GPU >-------------+Cursor | | +---+ DVO +--> 24b//
+ ------- | COMPOSITOR | | TV OUT | :===========:
+ | | | | | +---+ HD analog +--> YCbCr
+Vid >--+ HQVDP +--+VID Aux +---+ | :===========:
+dec | | | mixer| | +---+ SD analog +--> CVBS
+ '-------' '-------------' '-----------' '-----------'
+ .-----------.
+ | main+--> Vsync
+ | VTG |
+ | aux+--> Vsync
+ '-----------'
+
+2. DRM / HW mapping
+-------------------
+These IP are mapped to the DRM objects as following:
+- The CRTCs are mapped to the Compositor Main and Aux Mixers
+- The Framebuffers and planes are mapped to the Compositor GDP (non video
+ buffers) and to HQVDP+VID (video buffers)
+- The Cursor is mapped to the Compositor Cursor
+- The Encoders are mapped to the TVOut
+- The Bridges/Connectors are mapped to the HDMI / DVO / HD Analog / SD analog
+
+FB & planes Cursor CRTC Encoders Bridges/Connectors
+ | | | | |
+ | | | | |
+ | .-------------. | .-----------. .-----------. |
+ +------------> |GDP | Main | | | +-> | | HDMI | <-+
+ +------------> |GDP v mixer|<+ | | | :===========: |
+ | |Cursor | | | +-> | | DVO | <-+
+ | ------- | COMPOSITOR | | |TV OUT | | :===========: |
+ | | | | | | | +-> | | HD analog | <-+
+ +-> | HQVDP | |VID Aux |<+ | | | :===========: |
+ | | | mixer| | +-> | | SD analog | <-+
+ '-------' '-------------' '-----------' '-----------'
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
new file mode 100644
index 000000000000..390d93e9a06c
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+
+#include "sti_compositor.h"
+#include "sti_drm_crtc.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_plane.h"
+#include "sti_gdp.h"
+#include "sti_vtg.h"
+
+/*
+ * stiH407 compositor properties
+ */
+struct sti_compositor_data stih407_compositor_data = {
+ .nb_subdev = 6,
+ .subdev_desc = {
+ {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
+ {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
+ {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
+ {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
+ {STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ },
+};
+
+/*
+ * stiH416 compositor properties
+ * Note:
+ * on stih416 MIXER_AUX has a different base address from MIXER_MAIN
+ * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does
+ * not fit for stiH416 if we want to enable the MIXER_AUX.
+ */
+struct sti_compositor_data stih416_compositor_data = {
+ .nb_subdev = 3,
+ .subdev_desc = {
+ {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
+ {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ },
+};
+
+static int sti_compositor_init_subdev(struct sti_compositor *compo,
+ struct sti_compositor_subdev_descriptor *desc,
+ unsigned int array_size)
+{
+ unsigned int i, mixer_id = 0, layer_id = 0;
+
+ for (i = 0; i < array_size; i++) {
+ switch (desc[i].type) {
+ case STI_MIXER_MAIN_SUBDEV:
+ case STI_MIXER_AUX_SUBDEV:
+ compo->mixer[mixer_id++] =
+ sti_mixer_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
+ case STI_GPD_SUBDEV:
+ case STI_VID_SUBDEV:
+ compo->layer[layer_id++] =
+ sti_layer_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
+ /* case STI_CURSOR_SUBDEV : TODO */
+ default:
+ DRM_ERROR("Unknow subdev compoment type\n");
+ return 1;
+ }
+
+ }
+ compo->nb_mixers = mixer_id;
+ compo->nb_layers = layer_id;
+
+ return 0;
+}
+
+static int sti_compositor_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i, crtc = 0, plane = 0;
+ struct sti_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_plane *cursor = NULL;
+ struct drm_plane *primary = NULL;
+
+ dev_priv->compo = compo;
+
+ for (i = 0; i < compo->nb_layers; i++) {
+ if (compo->layer[i]) {
+ enum sti_layer_desc desc = compo->layer[i]->desc;
+ enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
+ enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
+
+ if (compo->mixer[crtc])
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+
+ switch (type) {
+ case STI_CUR:
+ cursor = sti_drm_plane_init(drm_dev,
+ compo->layer[i],
+ (1 << crtc) - 1,
+ DRM_PLANE_TYPE_CURSOR);
+ break;
+ case STI_GDP:
+ case STI_VID:
+ primary = sti_drm_plane_init(drm_dev,
+ compo->layer[i],
+ (1 << crtc) - 1, plane_type);
+ plane++;
+ break;
+ case STI_BCK:
+ break;
+ }
+
+ /* The first planes are reserved for primary planes*/
+ if (compo->mixer[crtc]) {
+ sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
+ primary, cursor);
+ crtc++;
+ cursor = NULL;
+ }
+ }
+ }
+
+ drm_vblank_init(drm_dev, crtc);
+ /* Allow usage of vblank without having to call drm_irq_install */
+ drm_dev->irq_enabled = 1;
+
+ DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
+ crtc, plane);
+ DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
+
+ return 0;
+}
+
+static void sti_compositor_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_compositor_ops = {
+ .bind = sti_compositor_bind,
+ .unbind = sti_compositor_unbind,
+};
+
+static const struct of_device_id compositor_of_match[] = {
+ {
+ .compatible = "st,stih416-compositor",
+ .data = &stih416_compositor_data,
+ }, {
+ .compatible = "st,stih407-compositor",
+ .data = &stih407_compositor_data,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, compositor_of_match);
+
+static int sti_compositor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *vtg_np;
+ struct sti_compositor *compo;
+ struct resource *res;
+ int err;
+
+ compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
+ if (!compo) {
+ DRM_ERROR("Failed to allocate compositor context\n");
+ return -ENOMEM;
+ }
+ compo->dev = dev;
+ compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+
+ /* populate data structure depending on compatibility */
+ BUG_ON(!of_match_node(compositor_of_match, np)->data);
+
+ memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
+ sizeof(struct sti_compositor_data));
+
+ /* Get Memory ressources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENXIO;
+ }
+ compo->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (compo->regs == NULL) {
+ DRM_ERROR("Register mapping failed\n");
+ return -ENXIO;
+ }
+
+ /* Get clock resources */
+ compo->clk_compo_main = devm_clk_get(dev, "compo_main");
+ if (IS_ERR(compo->clk_compo_main)) {
+ DRM_ERROR("Cannot get compo_main clock\n");
+ return PTR_ERR(compo->clk_compo_main);
+ }
+
+ compo->clk_compo_aux = devm_clk_get(dev, "compo_aux");
+ if (IS_ERR(compo->clk_compo_aux)) {
+ DRM_ERROR("Cannot get compo_aux clock\n");
+ return PTR_ERR(compo->clk_compo_aux);
+ }
+
+ compo->clk_pix_main = devm_clk_get(dev, "pix_main");
+ if (IS_ERR(compo->clk_pix_main)) {
+ DRM_ERROR("Cannot get pix_main clock\n");
+ return PTR_ERR(compo->clk_pix_main);
+ }
+
+ compo->clk_pix_aux = devm_clk_get(dev, "pix_aux");
+ if (IS_ERR(compo->clk_pix_aux)) {
+ DRM_ERROR("Cannot get pix_aux clock\n");
+ return PTR_ERR(compo->clk_pix_aux);
+ }
+
+ /* Get reset resources */
+ compo->rst_main = devm_reset_control_get(dev, "compo-main");
+ /* Take compo main out of reset */
+ if (!IS_ERR(compo->rst_main))
+ reset_control_deassert(compo->rst_main);
+
+ compo->rst_aux = devm_reset_control_get(dev, "compo-aux");
+ /* Take compo aux out of reset */
+ if (!IS_ERR(compo->rst_aux))
+ reset_control_deassert(compo->rst_aux);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
+ if (vtg_np)
+ compo->vtg_main = of_vtg_find(vtg_np);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
+ if (vtg_np)
+ compo->vtg_aux = of_vtg_find(vtg_np);
+
+ /* Initialize compositor subdevices */
+ err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
+ compo->data.nb_subdev);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, compo);
+
+ return component_add(&pdev->dev, &sti_compositor_ops);
+}
+
+static int sti_compositor_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_compositor_ops);
+ return 0;
+}
+
+static struct platform_driver sti_compositor_driver = {
+ .driver = {
+ .name = "sti-compositor",
+ .owner = THIS_MODULE,
+ .of_match_table = compositor_of_match,
+ },
+ .probe = sti_compositor_probe,
+ .remove = sti_compositor_remove,
+};
+
+module_platform_driver(sti_compositor_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
new file mode 100644
index 000000000000..3ea19db72e0f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_COMPOSITOR_H_
+#define _STI_COMPOSITOR_H_
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+
+#include "sti_layer.h"
+#include "sti_mixer.h"
+
+#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
+
+#define STI_MAX_LAYER 8
+#define STI_MAX_MIXER 2
+
+enum sti_compositor_subdev_type {
+ STI_MIXER_MAIN_SUBDEV,
+ STI_MIXER_AUX_SUBDEV,
+ STI_GPD_SUBDEV,
+ STI_VID_SUBDEV,
+ STI_CURSOR_SUBDEV,
+};
+
+struct sti_compositor_subdev_descriptor {
+ enum sti_compositor_subdev_type type;
+ int id;
+ unsigned int offset;
+};
+
+/**
+ * STI Compositor data structure
+ *
+ * @nb_subdev: number of subdevices supported by the compositor
+ * @subdev_desc: subdev list description
+ */
+#define MAX_SUBDEV 9
+struct sti_compositor_data {
+ unsigned int nb_subdev;
+ struct sti_compositor_subdev_descriptor subdev_desc[MAX_SUBDEV];
+};
+
+/**
+ * STI Compositor structure
+ *
+ * @dev: driver device
+ * @regs: registers (main)
+ * @data: device data
+ * @clk_compo_main: clock for main compo
+ * @clk_compo_aux: clock for aux compo
+ * @clk_pix_main: pixel clock for main path
+ * @clk_pix_aux: pixel clock for aux path
+ * @rst_main: reset control of the main path
+ * @rst_aux: reset control of the aux path
+ * @mixer: array of mixers
+ * @vtg_main: vtg for main data path
+ * @vtg_aux: vtg for auxillary data path
+ * @layer: array of layers
+ * @nb_mixers: number of mixers for this compositor
+ * @nb_layers: number of layers (GDP,VID,...) for this compositor
+ * @enable: true if compositor is enable else false
+ * @vtg_vblank_nb: callback for VTG VSYNC notification
+ */
+struct sti_compositor {
+ struct device *dev;
+ void __iomem *regs;
+ struct sti_compositor_data data;
+ struct clk *clk_compo_main;
+ struct clk *clk_compo_aux;
+ struct clk *clk_pix_main;
+ struct clk *clk_pix_aux;
+ struct reset_control *rst_main;
+ struct reset_control *rst_aux;
+ struct sti_mixer *mixer[STI_MAX_MIXER];
+ struct sti_vtg *vtg_main;
+ struct sti_vtg *vtg_aux;
+ struct sti_layer *layer[STI_MAX_LAYER];
+ int nb_mixers;
+ int nb_layers;
+ bool enable;
+ struct notifier_block vtg_vblank_nb;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
new file mode 100644
index 000000000000..d2ae0c0e13be
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_crtc.h"
+#include "sti_vtg.h"
+
+static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+
+ compo->enable = true;
+
+ /* Prepare and enable the compo IP clock */
+ if (mixer->id == STI_MIXER_MAIN) {
+ if (clk_prepare_enable(compo->clk_compo_main))
+ DRM_INFO("Failed to prepare/enable compo_main clk\n");
+ } else {
+ if (clk_prepare_enable(compo->clk_compo_aux))
+ DRM_INFO("Failed to prepare/enable compo_aux clk\n");
+ }
+}
+
+static void sti_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+
+ if ((!mixer || !compo)) {
+ DRM_ERROR("Can not find mixer or compositor)\n");
+ return;
+ }
+
+ /* get GDP which is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (layer)
+ sti_layer_commit(layer);
+ else
+ DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
+
+ /* Enable layer on mixer */
+ if (sti_mixer_set_layer_status(mixer, layer, true))
+ DRM_ERROR("Can not enable layer at mixer\n");
+}
+
+static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* accept the provided drm_display_mode, do not fix it up */
+ return true;
+}
+
+static int
+sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+ struct clk *clk;
+ int rate = mode->clock * 1000;
+ int res;
+ unsigned int w, h;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->primary->fb->base.id, mode->base.id, mode->name);
+
+ DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+ mode->vrefresh, mode->clock,
+ mode->hdisplay,
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal,
+ mode->vdisplay,
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->type, mode->flags);
+
+ /* Set rate and prepare/enable pixel clock */
+ if (mixer->id == STI_MIXER_MAIN)
+ clk = compo->clk_pix_main;
+ else
+ clk = compo->clk_pix_aux;
+
+ res = clk_set_rate(clk, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
+ return -EINVAL;
+ }
+ if (clk_prepare_enable(clk)) {
+ DRM_ERROR("Failed to prepare/enable pix clk\n");
+ return -EINVAL;
+ }
+
+ sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, &crtc->mode);
+
+ /* a GDP is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Can not find GDP0)\n");
+ return -EINVAL;
+ }
+
+ /* copy the mode data adjusted by mode_fixup() into crtc->mode
+ * so that hardware can be set to proper mode
+ */
+ memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
+
+ res = sti_mixer_set_layer_depth(mixer, layer);
+ if (res) {
+ DRM_ERROR("Can not set layer depth\n");
+ return -EINVAL;
+ }
+ res = sti_mixer_active_video_area(mixer, &crtc->mode);
+ if (res) {
+ DRM_ERROR("Can not set active video area\n");
+ return -EINVAL;
+ }
+
+ w = crtc->primary->fb->width - x;
+ h = crtc->primary->fb->height - y;
+
+ return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ mixer->id, 0, 0, w, h, x, y, w, h);
+}
+
+static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct sti_layer *layer;
+ unsigned int w, h;
+ int ret;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->primary->fb->base.id, x, y);
+
+ /* GDP is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Can not find GDP0)\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ w = crtc->primary->fb->width - crtc->x;
+ h = crtc->primary->fb->height - crtc->y;
+
+ ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ mixer->id, 0, 0, w, h,
+ crtc->x, crtc->y, w, h);
+ if (ret) {
+ DRM_ERROR("Can not prepare layer\n");
+ goto out;
+ }
+
+ sti_drm_crtc_commit(crtc);
+out:
+ return ret;
+}
+
+static void sti_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+ /* do nothing */
+}
+
+static void sti_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+
+ if (!compo->enable)
+ return;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
+
+ /* Disable Background */
+ sti_mixer_set_background_status(mixer, false);
+
+ /* Disable GDP */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Cannot find GDP0\n");
+ return;
+ }
+
+ /* Disable layer at mixer level */
+ if (sti_mixer_set_layer_status(mixer, layer, false))
+ DRM_ERROR("Can not disable %s layer at mixer\n",
+ sti_layer_to_str(layer));
+
+ /* Wait a while to be sure that a Vsync event is received */
+ msleep(WAIT_NEXT_VSYNC_MS);
+
+ /* Then disable layer itself */
+ sti_layer_disable(layer);
+
+ drm_vblank_off(crtc->dev, mixer->id);
+
+ /* Disable pixel clock and compo IP clocks */
+ if (mixer->id == STI_MIXER_MAIN) {
+ clk_disable_unprepare(compo->clk_pix_main);
+ clk_disable_unprepare(compo->clk_compo_main);
+ } else {
+ clk_disable_unprepare(compo->clk_pix_aux);
+ clk_disable_unprepare(compo->clk_compo_aux);
+ }
+
+ compo->enable = false;
+}
+
+static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
+ .dpms = sti_drm_crtc_dpms,
+ .prepare = sti_drm_crtc_prepare,
+ .commit = sti_drm_crtc_commit,
+ .mode_fixup = sti_drm_crtc_mode_fixup,
+ .mode_set = sti_drm_crtc_mode_set,
+ .mode_set_base = sti_drm_crtc_mode_set_base,
+ .load_lut = sti_drm_crtc_load_lut,
+ .disable = sti_drm_crtc_disable,
+};
+
+static int sti_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *drm_dev = crtc->dev;
+ struct drm_framebuffer *old_fb;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ unsigned long flags;
+ int ret;
+
+ DRM_DEBUG_KMS("fb %d --> fb %d\n",
+ crtc->primary->fb->base.id, fb->base.id);
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ old_fb = crtc->primary->fb;
+ crtc->primary->fb = fb;
+ ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ if (ret) {
+ DRM_ERROR("failed\n");
+ crtc->primary->fb = old_fb;
+ goto out;
+ }
+
+ if (event) {
+ event->pipe = mixer->id;
+
+ ret = drm_vblank_get(drm_dev, event->pipe);
+ if (ret) {
+ DRM_ERROR("Cannot get vblank\n");
+ goto out;
+ }
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (mixer->pending_event) {
+ drm_vblank_put(drm_dev, event->pipe);
+ ret = -EBUSY;
+ } else {
+ mixer->pending_event = event;
+ }
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ }
+out:
+ mutex_unlock(&drm_dev->struct_mutex);
+ return ret;
+}
+
+static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("\n");
+ drm_crtc_cleanup(crtc);
+}
+
+static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DRM_DEBUG_KMS("\n");
+ return 0;
+}
+
+int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct drm_device *drm_dev;
+ struct sti_compositor *compo =
+ container_of(nb, struct sti_compositor, vtg_vblank_nb);
+ int *crtc = data;
+ unsigned long flags;
+ struct sti_drm_private *priv;
+
+ drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
+ priv = drm_dev->dev_private;
+
+ if ((event != VTG_TOP_FIELD_EVENT) &&
+ (event != VTG_BOTTOM_FIELD_EVENT)) {
+ DRM_ERROR("unknown event: %lu\n", event);
+ return -EINVAL;
+ }
+
+ drm_handle_vblank(drm_dev, *crtc);
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (compo->mixer[*crtc]->pending_event) {
+ drm_send_vblank_event(drm_dev, -1,
+ compo->mixer[*crtc]->pending_event);
+ drm_vblank_put(drm_dev, *crtc);
+ compo->mixer[*crtc]->pending_event = NULL;
+ }
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ return 0;
+}
+
+int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct sti_drm_private *dev_priv = dev->dev_private;
+ struct sti_compositor *compo = dev_priv->compo;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+
+ if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ vtg_vblank_nb, crtc)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
+
+void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct sti_drm_private *priv = dev->dev_private;
+ struct sti_compositor *compo = priv->compo;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+ unsigned long flags;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ /* free the resources of the pending requests */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (compo->mixer[crtc]->pending_event) {
+ drm_vblank_put(dev, crtc);
+ compo->mixer[crtc]->pending_event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+}
+EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
+
+static struct drm_crtc_funcs sti_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = sti_drm_crtc_page_flip,
+ .destroy = sti_drm_crtc_destroy,
+ .set_property = sti_drm_crtc_set_property,
+};
+
+bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+ if (mixer->id == STI_MIXER_MAIN)
+ return true;
+
+ return false;
+}
+
+int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ struct drm_crtc *crtc = &mixer->drm_crtc;
+ int res;
+
+ res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+ &sti_crtc_funcs);
+ if (res) {
+ DRM_ERROR("Can not initialze CRTC\n");
+ return -EINVAL;
+ }
+
+ drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
+
+ DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
+ crtc->base.id, sti_mixer_to_str(mixer));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
new file mode 100644
index 000000000000..caca8b14f017
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_CRTC_H_
+#define _STI_DRM_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor);
+int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data);
+bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
new file mode 100644
index 000000000000..a7cc24917a96
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/component.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_drm_drv.h"
+#include "sti_drm_crtc.h"
+
+#define DRIVER_NAME "sti"
+#define DRIVER_DESC "STMicroelectronics SoC DRM"
+#define DRIVER_DATE "20140601"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define STI_MAX_FB_HEIGHT 4096
+#define STI_MAX_FB_WIDTH 4096
+
+static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+};
+
+static void sti_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value.
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
+ dev->mode_config.max_height = STI_MAX_FB_WIDTH;
+
+ dev->mode_config.funcs = &sti_drm_mode_config_funcs;
+}
+
+static int sti_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct sti_drm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
+ if (!private) {
+ DRM_ERROR("Failed to allocate private\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = (void *)private;
+ private->drm_dev = dev;
+
+ drm_mode_config_init(dev);
+ drm_kms_helper_poll_init(dev);
+
+ sti_drm_mode_config_init(dev);
+
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ return ret;
+
+ drm_helper_disable_unused_functions(dev);
+
+#ifdef CONFIG_DRM_STI_FBDEV
+ drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+#endif
+ return 0;
+}
+
+static const struct file_operations sti_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .release = drm_release,
+};
+
+static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ /* we want to be able to write in mmapped buffer */
+ flags |= O_RDWR;
+ return drm_gem_prime_export(dev, obj, flags);
+}
+
+static struct drm_driver sti_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ DRIVER_GEM | DRIVER_PRIME,
+ .load = sti_drm_load,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .fops = &sti_drm_driver_fops,
+
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = sti_drm_crtc_enable_vblank,
+ .disable_vblank = sti_drm_crtc_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = sti_drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sti_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
+}
+
+static void sti_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops sti_drm_ops = {
+ .bind = sti_drm_bind,
+ .unbind = sti_drm_unbind,
+};
+
+static int sti_drm_master_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct device_node *child_np;
+ struct component_match *match = NULL;
+
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+ child_np = of_get_next_available_child(node, NULL);
+
+ while (child_np) {
+ component_match_add(dev, &match, compare_of, child_np);
+ of_node_put(child_np);
+ child_np = of_get_next_available_child(node, child_np);
+ }
+
+ return component_master_add_with_match(dev, &sti_drm_ops, match);
+}
+
+static int sti_drm_master_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &sti_drm_ops);
+ return 0;
+}
+
+static struct platform_driver sti_drm_master_driver = {
+ .probe = sti_drm_master_probe,
+ .remove = sti_drm_master_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME "__master",
+ },
+};
+
+static int sti_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *master;
+
+ of_platform_populate(node, NULL, NULL, dev);
+
+ platform_driver_register(&sti_drm_master_driver);
+ master = platform_device_register_resndata(dev,
+ DRIVER_NAME "__master", -1,
+ NULL, 0, NULL, 0);
+ if (!master)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+}
+
+static int sti_drm_platform_remove(struct platform_device *pdev)
+{
+ struct platform_device *master = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ platform_device_unregister(master);
+ platform_driver_unregister(&sti_drm_master_driver);
+ return 0;
+}
+
+static const struct of_device_id sti_drm_dt_ids[] = {
+ { .compatible = "st,sti-display-subsystem", },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
+
+static struct platform_driver sti_drm_platform_driver = {
+ .probe = sti_drm_platform_probe,
+ .remove = sti_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = sti_drm_dt_ids,
+ },
+};
+
+module_platform_driver(sti_drm_platform_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
new file mode 100644
index 000000000000..ec5e2eb8dff9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_DRV_H_
+#define _STI_DRM_DRV_H_
+
+#include <drm/drmP.h>
+
+struct sti_compositor;
+struct sti_tvout;
+
+/**
+ * STI drm private structure
+ * This structure is stored as private in the drm_device
+ *
+ * @compo: compositor
+ * @plane_zorder_property: z-order property for CRTC planes
+ * @drm_dev: drm device
+ */
+struct sti_drm_private {
+ struct sti_compositor *compo;
+ struct drm_property *plane_zorder_property;
+ struct drm_device *drm_dev;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
new file mode 100644
index 000000000000..f4118d4cac22
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_compositor.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_plane.h"
+#include "sti_vtg.h"
+
+enum sti_layer_desc sti_layer_default_zorder[] = {
+ STI_GDP_0,
+ STI_VID_0,
+ STI_GDP_1,
+ STI_VID_1,
+ STI_GDP_2,
+ STI_GDP_3,
+};
+
+/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
+
+static int
+sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct sti_layer *layer = to_sti_layer(plane);
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ int res;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ plane->base.id, sti_layer_to_str(layer), fb->base.id);
+ DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
+
+ res = sti_mixer_set_layer_depth(mixer, layer);
+ if (res) {
+ DRM_ERROR("Can not set layer depth\n");
+ return res;
+ }
+
+ /* src_x are in 16.16 format. */
+ res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+ if (res) {
+ DRM_ERROR("Layer prepare failed\n");
+ return res;
+ }
+
+ res = sti_layer_commit(layer);
+ if (res) {
+ DRM_ERROR("Layer commit failed\n");
+ return res;
+ }
+
+ res = sti_mixer_set_layer_status(mixer, layer, true);
+ if (res) {
+ DRM_ERROR("Can not enable layer at mixer\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static int sti_drm_disable_plane(struct drm_plane *plane)
+{
+ struct sti_layer *layer;
+ struct sti_mixer *mixer;
+ int lay_res, mix_res;
+
+ if (!plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
+ return 0;
+ }
+ layer = to_sti_layer(plane);
+ mixer = to_sti_mixer(plane->crtc);
+
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ plane->crtc->base.id, sti_mixer_to_str(mixer),
+ plane->base.id, sti_layer_to_str(layer));
+
+ /* Disable layer at mixer level */
+ mix_res = sti_mixer_set_layer_status(mixer, layer, false);
+ if (mix_res)
+ DRM_ERROR("Can not disable layer at mixer\n");
+
+ /* Wait a while to be sure that a Vsync event is received */
+ msleep(WAIT_NEXT_VSYNC_MS);
+
+ /* Then disable layer itself */
+ lay_res = sti_layer_disable(layer);
+ if (lay_res)
+ DRM_ERROR("Layer disable failed\n");
+
+ if (lay_res || mix_res)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sti_drm_plane_destroy(struct drm_plane *plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ sti_drm_disable_plane(plane);
+ drm_plane_cleanup(plane);
+}
+
+static int sti_drm_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sti_drm_private *private = dev->dev_private;
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (property == private->plane_zorder_property) {
+ layer->zorder = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct drm_plane_funcs sti_drm_plane_funcs = {
+ .update_plane = sti_drm_update_plane,
+ .disable_plane = sti_drm_disable_plane,
+ .destroy = sti_drm_plane_destroy,
+ .set_property = sti_drm_plane_set_property,
+};
+
+static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
+ uint64_t default_val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sti_drm_private *private = dev->dev_private;
+ struct drm_property *prop;
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ prop = private->plane_zorder_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 0,
+ GAM_MIXER_NB_DEPTH_LEVEL - 1);
+ if (!prop)
+ return;
+
+ private->plane_zorder_property = prop;
+ }
+
+ drm_object_attach_property(&plane->base, prop, default_val);
+ layer->zorder = default_val;
+}
+
+struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
+ struct sti_layer *layer,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type)
+{
+ int err, i;
+ uint64_t default_zorder = 0;
+
+ err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
+ &sti_drm_plane_funcs,
+ sti_layer_get_formats(layer),
+ sti_layer_get_nb_formats(layer), type);
+ if (err) {
+ DRM_ERROR("Failed to initialize plane\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
+ if (sti_layer_default_zorder[i] == layer->desc)
+ break;
+
+ default_zorder = i;
+
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ sti_drm_plane_attach_zorder_property(&layer->plane,
+ default_zorder);
+
+ DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
+ layer->plane.base.id,
+ sti_layer_to_str(layer), default_zorder);
+
+ return &layer->plane;
+}
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
new file mode 100644
index 000000000000..4f191839f2a7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_PLANE_H_
+#define _STI_DRM_PLANE_H_
+
+#include <drm/drmP.h>
+
+struct sti_layer;
+
+struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
+ struct sti_layer *layer,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
new file mode 100644
index 000000000000..4e30b74559f5
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+
+#include "sti_compositor.h"
+#include "sti_gdp.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+#define ENA_COLOR_FILL BIT(8)
+#define WAIT_NEXT_VSYNC BIT(31)
+
+/* GDP color formats */
+#define GDP_RGB565 0x00
+#define GDP_RGB888 0x01
+#define GDP_RGB888_32 0x02
+#define GDP_ARGB8565 0x04
+#define GDP_ARGB8888 0x05
+#define GDP_ARGB1555 0x06
+#define GDP_ARGB4444 0x07
+#define GDP_CLUT8 0x0B
+#define GDP_YCBR888 0x10
+#define GDP_YCBR422R 0x12
+#define GDP_AYCBR8888 0x15
+
+#define GAM_GDP_CTL_OFFSET 0x00
+#define GAM_GDP_AGC_OFFSET 0x04
+#define GAM_GDP_VPO_OFFSET 0x0C
+#define GAM_GDP_VPS_OFFSET 0x10
+#define GAM_GDP_PML_OFFSET 0x14
+#define GAM_GDP_PMP_OFFSET 0x18
+#define GAM_GDP_SIZE_OFFSET 0x1C
+#define GAM_GDP_NVN_OFFSET 0x24
+#define GAM_GDP_KEY1_OFFSET 0x28
+#define GAM_GDP_KEY2_OFFSET 0x2C
+#define GAM_GDP_PPT_OFFSET 0x34
+#define GAM_GDP_CML_OFFSET 0x3C
+#define GAM_GDP_MST_OFFSET 0x68
+
+#define GAM_GDP_ALPHARANGE_255 BIT(5)
+#define GAM_GDP_AGC_FULL_RANGE 0x00808080
+#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
+#define GAM_GDP_SIZE_MAX 0x7FF
+
+#define GDP_NODE_NB_BANK 2
+#define GDP_NODE_PER_FIELD 2
+
+struct sti_gdp_node {
+ u32 gam_gdp_ctl;
+ u32 gam_gdp_agc;
+ u32 reserved1;
+ u32 gam_gdp_vpo;
+ u32 gam_gdp_vps;
+ u32 gam_gdp_pml;
+ u32 gam_gdp_pmp;
+ u32 gam_gdp_size;
+ u32 reserved2;
+ u32 gam_gdp_nvn;
+ u32 gam_gdp_key1;
+ u32 gam_gdp_key2;
+ u32 reserved3;
+ u32 gam_gdp_ppt;
+ u32 reserved4;
+ u32 gam_gdp_cml;
+};
+
+struct sti_gdp_node_list {
+ struct sti_gdp_node *top_field;
+ struct sti_gdp_node *btm_field;
+};
+
+/**
+ * STI GDP structure
+ *
+ * @layer: layer structure
+ * @clk_pix: pixel clock for the current gdp
+ * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
+ * @is_curr_top: true if the current node processed is the top field
+ * @node_list: array of node list
+ */
+struct sti_gdp {
+ struct sti_layer layer;
+ struct clk *clk_pix;
+ struct notifier_block vtg_field_nb;
+ bool is_curr_top;
+ struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
+};
+
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+
+static const uint32_t gdp_supported_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_C8,
+};
+
+static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
+{
+ return gdp_supported_formats;
+}
+
+static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(gdp_supported_formats);
+}
+
+static int sti_gdp_fourcc2format(int fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_XRGB8888:
+ return GDP_RGB888_32;
+ case DRM_FORMAT_ARGB8888:
+ return GDP_ARGB8888;
+ case DRM_FORMAT_ARGB4444:
+ return GDP_ARGB4444;
+ case DRM_FORMAT_ARGB1555:
+ return GDP_ARGB1555;
+ case DRM_FORMAT_RGB565:
+ return GDP_RGB565;
+ case DRM_FORMAT_RGB888:
+ return GDP_RGB888;
+ case DRM_FORMAT_AYUV:
+ return GDP_AYCBR8888;
+ case DRM_FORMAT_YUV444:
+ return GDP_YCBR888;
+ case DRM_FORMAT_VYUY:
+ return GDP_YCBR422R;
+ case DRM_FORMAT_C8:
+ return GDP_CLUT8;
+ }
+ return -1;
+}
+
+static int sti_gdp_get_alpharange(int format)
+{
+ switch (format) {
+ case GDP_ARGB8565:
+ case GDP_ARGB8888:
+ case GDP_AYCBR8888:
+ return GAM_GDP_ALPHARANGE_255;
+ }
+ return 0;
+}
+
+/**
+ * sti_gdp_get_free_nodes
+ * @layer: gdp layer
+ *
+ * Look for a GDP node list that is not currently read by the HW.
+ *
+ * RETURNS:
+ * Pointer to the free GDP node list
+ */
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+{
+ int hw_nvn;
+ void *virt_nvn;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ unsigned int i;
+
+ hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ if (!hw_nvn)
+ goto end;
+
+ virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++)
+ if ((virt_nvn != gdp->node_list[i].btm_field) &&
+ (virt_nvn != gdp->node_list[i].top_field))
+ return &gdp->node_list[i];
+
+ /* in hazardious cases restart with the first node */
+ DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
+ sti_layer_to_str(layer), hw_nvn);
+
+end:
+ return &gdp->node_list[0];
+}
+
+/**
+ * sti_gdp_get_current_nodes
+ * @layer: GDP layer
+ *
+ * Look for GDP nodes that are currently read by the HW.
+ *
+ * RETURNS:
+ * Pointer to the current GDP node list
+ */
+static
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+{
+ int hw_nvn;
+ void *virt_nvn;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ unsigned int i;
+
+ hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ if (!hw_nvn)
+ goto end;
+
+ virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++)
+ if ((virt_nvn == gdp->node_list[i].btm_field) ||
+ (virt_nvn == gdp->node_list[i].top_field))
+ return &gdp->node_list[i];
+
+end:
+ DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
+ hw_nvn, sti_layer_to_str(layer));
+
+ return NULL;
+}
+
+/**
+ * sti_gdp_prepare_layer
+ * @lay: gdp layer
+ * @first_prepare: true if it is the first time this function is called
+ *
+ * Update the free GDP node list according to the layer properties.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_gdp_node_list *list;
+ struct sti_gdp_node *top_field, *btm_field;
+ struct drm_display_mode *mode = layer->mode;
+ struct device *dev = layer->dev;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ int format;
+ unsigned int depth, bpp;
+ int rate = mode->clock * 1000;
+ int res;
+ u32 ydo, xdo, yds, xds;
+
+ list = sti_gdp_get_free_nodes(layer);
+ top_field = list->top_field;
+ btm_field = list->btm_field;
+
+ dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+ sti_layer_to_str(layer), top_field, btm_field);
+
+ /* Build the top field from layer params */
+ top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+ top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+ format = sti_gdp_fourcc2format(layer->format);
+ if (format == -1) {
+ DRM_ERROR("Format not supported by GDP %.4s\n",
+ (char *)&layer->format);
+ return 1;
+ }
+ top_field->gam_gdp_ctl |= format;
+ top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+ top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+ /* pixel memory location */
+ drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
+ top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
+ top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
+ top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
+
+ /* input parameters */
+ top_field->gam_gdp_pmp = layer->pitches[0];
+ top_field->gam_gdp_size =
+ clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+ clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
+
+ /* output parameters */
+ ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
+ yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
+ top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+ top_field->gam_gdp_vps = (yds << 16) | xds;
+
+ /* Same content and chained together */
+ memcpy(btm_field, top_field, sizeof(*btm_field));
+ top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
+ btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);
+
+ /* Interlaced mode */
+ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
+ btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+ layer->pitches[0];
+
+ if (first_prepare) {
+ /* Register gdp callback */
+ if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ &gdp->vtg_field_nb, layer->mixer_id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return 1;
+ }
+
+ /* Set and enable gdp clock */
+ if (gdp->clk_pix) {
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
+ return 1;
+ }
+
+ if (clk_prepare_enable(gdp->clk_pix)) {
+ DRM_ERROR("Failed to prepare/enable gdp\n");
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sti_gdp_commit_layer
+ * @lay: gdp layer
+ *
+ * Update the NVN field of the 'right' field of the current GDP node (being
+ * used by the HW) with the address of the updated ('free') top field GDP node.
+ * - In interlaced mode the 'right' field is the bottom field as we update
+ * frames starting from their top field
+ * - In progressive mode, we update both bottom and top fields which are
+ * equal nodes.
+ * At the next VSYNC, the updated node list will be used by the HW.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_commit_layer(struct sti_layer *layer)
+{
+ struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
+ struct sti_gdp_node *updated_top_node = updated_list->top_field;
+ struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
+ u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
+ struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
+
+ dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
+ sti_layer_to_str(layer),
+ updated_top_node, updated_btm_node);
+ dev_dbg(layer->dev, "Current NVN:0x%X\n",
+ readl(layer->regs + GAM_GDP_NVN_OFFSET));
+ dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
+ (unsigned long)layer->paddr,
+ readl(layer->regs + GAM_GDP_PML_OFFSET));
+
+ if (curr_list == NULL) {
+ /* First update or invalid node should directly write in the
+ * hw register */
+ DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+ sti_layer_to_str(layer));
+
+ writel(gdp->is_curr_top == true ?
+ dma_updated_btm : dma_updated_top,
+ layer->regs + GAM_GDP_NVN_OFFSET);
+ return 0;
+ }
+
+ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (gdp->is_curr_top == true) {
+ /* Do not update in the middle of the frame, but
+ * postpone the update after the bottom field has
+ * been displayed */
+ curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+ } else {
+ /* Direct update to avoid one frame delay */
+ writel(dma_updated_top,
+ layer->regs + GAM_GDP_NVN_OFFSET);
+ }
+ } else {
+ /* Direct update for progressive to avoid one frame delay */
+ writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
+ }
+
+ return 0;
+}
+
+/**
+ * sti_gdp_disable_layer
+ * @lay: gdp layer
+ *
+ * Disable a GDP.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_disable_layer(struct sti_layer *layer)
+{
+ unsigned int i;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct sti_compositor *compo = dev_get_drvdata(layer->dev);
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+
+ /* Set the nodes as 'to be ignored on mixer' */
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
+ gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
+ }
+
+ if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ if (gdp->clk_pix)
+ clk_disable_unprepare(gdp->clk_pix);
+
+ return 0;
+}
+
+/**
+ * sti_gdp_field_cb
+ * @nb: notifier block
+ * @event: event message
+ * @data: private data
+ *
+ * Handle VTG top field and bottom field event.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int sti_gdp_field_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
+
+ switch (event) {
+ case VTG_TOP_FIELD_EVENT:
+ gdp->is_curr_top = true;
+ break;
+ case VTG_BOTTOM_FIELD_EVENT:
+ gdp->is_curr_top = false;
+ break;
+ default:
+ DRM_ERROR("unsupported event: %lu\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static void sti_gdp_init(struct sti_layer *layer)
+{
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct device_node *np = layer->dev->of_node;
+ dma_addr_t dma;
+ void *base;
+ unsigned int i, size;
+
+ /* Allocate all the nodes within a single memory page */
+ size = sizeof(struct sti_gdp_node) *
+ GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
+
+ base = dma_alloc_writecombine(layer->dev,
+ size, &dma, GFP_KERNEL | GFP_DMA);
+ if (!base) {
+ DRM_ERROR("Failed to allocate memory for GDP node\n");
+ return;
+ }
+ memset(base, 0, size);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ if (virt_to_dma(layer->dev, base) & 0xF) {
+ DRM_ERROR("Mem alignment failed\n");
+ return;
+ }
+ gdp->node_list[i].top_field = base;
+ DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
+ base += sizeof(struct sti_gdp_node);
+
+ if (virt_to_dma(layer->dev, base) & 0xF) {
+ DRM_ERROR("Mem alignment failed\n");
+ return;
+ }
+ gdp->node_list[i].btm_field = base;
+ DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
+ base += sizeof(struct sti_gdp_node);
+ }
+
+ if (of_device_is_compatible(np, "st,stih407-compositor")) {
+ /* GDP of STiH407 chip have its own pixel clock */
+ char *clk_name;
+
+ switch (layer->desc) {
+ case STI_GDP_0:
+ clk_name = "pix_gdp1";
+ break;
+ case STI_GDP_1:
+ clk_name = "pix_gdp2";
+ break;
+ case STI_GDP_2:
+ clk_name = "pix_gdp3";
+ break;
+ case STI_GDP_3:
+ clk_name = "pix_gdp4";
+ break;
+ default:
+ DRM_ERROR("GDP id not recognized\n");
+ return;
+ }
+
+ gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+ if (IS_ERR(gdp->clk_pix))
+ DRM_ERROR("Cannot get %s clock\n", clk_name);
+ }
+}
+
+static const struct sti_layer_funcs gdp_ops = {
+ .get_formats = sti_gdp_get_formats,
+ .get_nb_formats = sti_gdp_get_nb_formats,
+ .init = sti_gdp_init,
+ .prepare = sti_gdp_prepare_layer,
+ .commit = sti_gdp_commit_layer,
+ .disable = sti_gdp_disable_layer,
+};
+
+struct sti_layer *sti_gdp_create(struct device *dev, int id)
+{
+ struct sti_gdp *gdp;
+
+ gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
+ if (!gdp) {
+ DRM_ERROR("Failed to allocate memory for GDP\n");
+ return NULL;
+ }
+
+ gdp->layer.ops = &gdp_ops;
+ gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
+
+ return (struct sti_layer *)gdp;
+}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
new file mode 100644
index 000000000000..1dab68274ad3
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_GDP_H_
+#define _STI_GDP_H_
+
+#include <linux/types.h>
+
+struct sti_layer *sti_gdp_create(struct device *dev, int id);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
new file mode 100644
index 000000000000..72d957f81c05
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+/* HDformatter registers */
+#define HDA_ANA_CFG 0x0000
+#define HDA_ANA_SCALE_CTRL_Y 0x0004
+#define HDA_ANA_SCALE_CTRL_CB 0x0008
+#define HDA_ANA_SCALE_CTRL_CR 0x000C
+#define HDA_ANA_ANC_CTRL 0x0010
+#define HDA_ANA_SRC_Y_CFG 0x0014
+#define HDA_COEFF_Y_PH1_TAP123 0x0018
+#define HDA_COEFF_Y_PH1_TAP456 0x001C
+#define HDA_COEFF_Y_PH2_TAP123 0x0020
+#define HDA_COEFF_Y_PH2_TAP456 0x0024
+#define HDA_COEFF_Y_PH3_TAP123 0x0028
+#define HDA_COEFF_Y_PH3_TAP456 0x002C
+#define HDA_COEFF_Y_PH4_TAP123 0x0030
+#define HDA_COEFF_Y_PH4_TAP456 0x0034
+#define HDA_ANA_SRC_C_CFG 0x0040
+#define HDA_COEFF_C_PH1_TAP123 0x0044
+#define HDA_COEFF_C_PH1_TAP456 0x0048
+#define HDA_COEFF_C_PH2_TAP123 0x004C
+#define HDA_COEFF_C_PH2_TAP456 0x0050
+#define HDA_COEFF_C_PH3_TAP123 0x0054
+#define HDA_COEFF_C_PH3_TAP456 0x0058
+#define HDA_COEFF_C_PH4_TAP123 0x005C
+#define HDA_COEFF_C_PH4_TAP456 0x0060
+#define HDA_SYNC_AWGI 0x0300
+
+/* HDA_ANA_CFG */
+#define CFG_AWG_ASYNC_EN BIT(0)
+#define CFG_AWG_ASYNC_HSYNC_MTD BIT(1)
+#define CFG_AWG_ASYNC_VSYNC_MTD BIT(2)
+#define CFG_AWG_SYNC_DEL BIT(3)
+#define CFG_AWG_FLTR_MODE_SHIFT 4
+#define CFG_AWG_FLTR_MODE_MASK (0xF << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_SD (0 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_ED (1 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_HD (2 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_SYNC_ON_PBPR_MASK BIT(8)
+#define CFG_PREFILTER_EN_MASK BIT(9)
+#define CFG_PBPR_SYNC_OFF_SHIFT 16
+#define CFG_PBPR_SYNC_OFF_MASK (0x7FF << CFG_PBPR_SYNC_OFF_SHIFT)
+#define CFG_PBPR_SYNC_OFF_VAL 0x117 /* Voltage dependent. stiH416 */
+
+/* Default scaling values */
+#define SCALE_CTRL_Y_DFLT 0x00C50256
+#define SCALE_CTRL_CB_DFLT 0x00DB0249
+#define SCALE_CTRL_CR_DFLT 0x00DB0249
+
+/* Video DACs control */
+#define VIDEO_DACS_CONTROL_MASK 0x0FFF
+#define VIDEO_DACS_CONTROL_SYSCFG2535 0x085C /* for stih416 */
+#define DAC_CFG_HD_OFF_SHIFT 5
+#define DAC_CFG_HD_OFF_MASK (0x7 << DAC_CFG_HD_OFF_SHIFT)
+#define VIDEO_DACS_CONTROL_SYSCFG5072 0x0120 /* for stih407 */
+#define DAC_CFG_HD_HZUVW_OFF_MASK BIT(1)
+
+
+/* Upsampler values for the alternative 2X Filter */
+#define SAMPLER_COEF_NB 8
+#define HDA_ANA_SRC_Y_CFG_ALT_2X 0x01130000
+static u32 coef_y_alt_2x[] = {
+ 0x00FE83FB, 0x1F900401, 0x00000000, 0x00000000,
+ 0x00F408F9, 0x055F7C25, 0x00000000, 0x00000000
+};
+
+#define HDA_ANA_SRC_C_CFG_ALT_2X 0x01750004
+static u32 coef_c_alt_2x[] = {
+ 0x001305F7, 0x05274BD0, 0x00000000, 0x00000000,
+ 0x0004907C, 0x09C80B9D, 0x00000000, 0x00000000
+};
+
+/* Upsampler values for the 4X Filter */
+#define HDA_ANA_SRC_Y_CFG_4X 0x01ED0005
+#define HDA_ANA_SRC_C_CFG_4X 0x01ED0004
+static u32 coef_yc_4x[] = {
+ 0x00FC827F, 0x008FE20B, 0x00F684FC, 0x050F7C24,
+ 0x00F4857C, 0x0A1F402E, 0x00FA027F, 0x0E076E1D
+};
+
+/* AWG instructions for some video modes */
+#define AWG_MAX_INST 64
+
+/* 720p@50 */
+static u32 AWGi_720p_50[] = {
+ 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
+ 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
+ 0x00000D8E, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C26, 0x0000003B, 0x00000FB4, 0x00000FB5,
+ 0x00000104, 0x00001AE8
+};
+
+#define NN_720p_50 ARRAY_SIZE(AWGi_720p_50)
+
+/* 720p@60 */
+static u32 AWGi_720p_60[] = {
+ 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
+ 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
+ 0x00000C44, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C26, 0x0000003B, 0x00000F0F, 0x00000F10,
+ 0x00000104, 0x00001AE8
+};
+
+#define NN_720p_60 ARRAY_SIZE(AWGi_720p_60)
+
+/* 1080p@30 */
+static u32 AWGi_1080p_30[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000C2A, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000EBE, 0x00000EBF,
+ 0x00000EBF, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_30 ARRAY_SIZE(AWGi_1080p_30)
+
+/* 1080p@25 */
+static u32 AWGi_1080p_25[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000DE2, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000F51, 0x00000F51,
+ 0x00000F52, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_25 ARRAY_SIZE(AWGi_1080p_25)
+
+/* 1080p@24 */
+static u32 AWGi_1080p_24[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000E50, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000F76, 0x00000F76,
+ 0x00000F76, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_24 ARRAY_SIZE(AWGi_1080p_24)
+
+/* 720x480p@60 */
+static u32 AWGi_720x480p_60[] = {
+ 0x00000904, 0x00000F18, 0x0000013B, 0x00001805,
+ 0x00000904, 0x00000C3D, 0x0000003B, 0x00001A06
+};
+
+#define NN_720x480p_60 ARRAY_SIZE(AWGi_720x480p_60)
+
+/* Video mode category */
+enum sti_hda_vid_cat {
+ VID_SD,
+ VID_ED,
+ VID_HD_74M,
+ VID_HD_148M
+};
+
+struct sti_hda_video_config {
+ struct drm_display_mode mode;
+ u32 *awg_instr;
+ int nb_instr;
+ enum sti_hda_vid_cat vid_cat;
+};
+
+/* HD analog supported modes
+ * Interlaced modes may be added when supported by the whole display chain
+ */
+static const struct sti_hda_video_config hda_supported_modes[] = {
+ /* 1080p30 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
+ /* 1080p30 74.176Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
+ /* 1080p24 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
+ /* 1080p24 74.176Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
+ /* 1080p25 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_25, NN_1080p_25, VID_HD_74M},
+ /* 720p60 74.250Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_60, NN_720p_60, VID_HD_74M},
+ /* 720p60 74.176Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74176, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_60, NN_720p_60, VID_HD_74M},
+ /* 720p50 74.250Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_50, NN_720p_50, VID_HD_74M},
+ /* 720x480p60 27.027Mhz */
+ {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27027, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ AWGi_720x480p_60, NN_720x480p_60, VID_ED},
+ /* 720x480p60 27.000Mhz */
+ {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ AWGi_720x480p_60, NN_720x480p_60, VID_ED}
+};
+
+/**
+ * STI hd analog structure
+ *
+ * @dev: driver device
+ * @drm_dev: pointer to drm device
+ * @mode: current display mode selected
+ * @regs: HD analog register
+ * @video_dacs_ctrl: video DACS control register
+ * @enabled: true if HD analog is enabled else false
+ */
+struct sti_hda {
+ struct device dev;
+ struct drm_device *drm_dev;
+ struct drm_display_mode mode;
+ void __iomem *regs;
+ void __iomem *video_dacs_ctrl;
+ struct clk *clk_pix;
+ struct clk *clk_hddac;
+ bool enabled;
+};
+
+struct sti_hda_connector {
+ struct drm_connector drm_connector;
+ struct drm_encoder *encoder;
+ struct sti_hda *hda;
+};
+
+#define to_sti_hda_connector(x) \
+ container_of(x, struct sti_hda_connector, drm_connector)
+
+static u32 hda_read(struct sti_hda *hda, int offset)
+{
+ return readl(hda->regs + offset);
+}
+
+static void hda_write(struct sti_hda *hda, u32 val, int offset)
+{
+ writel(val, hda->regs + offset);
+}
+
+/**
+ * Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ * @idx: index of the found mode
+ *
+ * Return true if mode is found
+ */
+static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++)
+ if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) {
+ *idx = i;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * Enable the HD DACS
+ *
+ * @hda: pointer to HD analog structure
+ * @enable: true if HD DACS need to be enabled, else false
+ */
+static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
+{
+ u32 mask;
+
+ if (hda->video_dacs_ctrl) {
+ u32 val;
+
+ switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) {
+ case VIDEO_DACS_CONTROL_SYSCFG2535:
+ mask = DAC_CFG_HD_OFF_MASK;
+ break;
+ case VIDEO_DACS_CONTROL_SYSCFG5072:
+ mask = DAC_CFG_HD_HZUVW_OFF_MASK;
+ break;
+ default:
+ DRM_INFO("Video DACS control register not supported!");
+ return;
+ }
+
+ val = readl(hda->video_dacs_ctrl);
+ if (enable)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ writel(val, hda->video_dacs_ctrl);
+ }
+}
+
+/**
+ * Configure AWG, writing instructions
+ *
+ * @hda: pointer to HD analog structure
+ * @awg_instr: pointer to AWG instructions table
+ * @nb: nb of AWG instructions
+ */
+static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
+{
+ unsigned int i;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for (i = 0; i < nb; i++)
+ hda_write(hda, awg_instr[i], HDA_SYNC_AWGI + i * 4);
+ for (i = nb; i < AWG_MAX_INST; i++)
+ hda_write(hda, 0, HDA_SYNC_AWGI + i * 4);
+}
+
+static void sti_hda_disable(struct drm_bridge *bridge)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 val;
+
+ if (!hda->enabled)
+ return;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable HD DAC and AWG */
+ val = hda_read(hda, HDA_ANA_CFG);
+ val &= ~CFG_AWG_ASYNC_EN;
+ hda_write(hda, val, HDA_ANA_CFG);
+ hda_write(hda, 0, HDA_ANA_ANC_CTRL);
+
+ hda_enable_hd_dacs(hda, false);
+
+ /* Disable/unprepare hda clock */
+ clk_disable_unprepare(hda->clk_hddac);
+ clk_disable_unprepare(hda->clk_pix);
+
+ hda->enabled = false;
+}
+
+static void sti_hda_pre_enable(struct drm_bridge *bridge)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 val, i, mode_idx;
+ u32 src_filter_y, src_filter_c;
+ u32 *coef_y, *coef_c;
+ u32 filter_mode;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hda->enabled)
+ return;
+
+ /* Prepare/enable clocks */
+ if (clk_prepare_enable(hda->clk_pix))
+ DRM_ERROR("Failed to prepare/enable hda_pix clk\n");
+ if (clk_prepare_enable(hda->clk_hddac))
+ DRM_ERROR("Failed to prepare/enable hda_hddac clk\n");
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ switch (hda_supported_modes[mode_idx].vid_cat) {
+ case VID_HD_148M:
+ DRM_ERROR("Beyond HD analog capabilities\n");
+ return;
+ case VID_HD_74M:
+ /* HD use alternate 2x filter */
+ filter_mode = CFG_AWG_FLTR_MODE_HD;
+ src_filter_y = HDA_ANA_SRC_Y_CFG_ALT_2X;
+ src_filter_c = HDA_ANA_SRC_C_CFG_ALT_2X;
+ coef_y = coef_y_alt_2x;
+ coef_c = coef_c_alt_2x;
+ break;
+ case VID_ED:
+ /* ED uses 4x filter */
+ filter_mode = CFG_AWG_FLTR_MODE_ED;
+ src_filter_y = HDA_ANA_SRC_Y_CFG_4X;
+ src_filter_c = HDA_ANA_SRC_C_CFG_4X;
+ coef_y = coef_yc_4x;
+ coef_c = coef_yc_4x;
+ break;
+ case VID_SD:
+ DRM_ERROR("Not supported\n");
+ return;
+ default:
+ DRM_ERROR("Undefined resolution\n");
+ return;
+ }
+ DRM_DEBUG_DRIVER("Using HDA mode #%d\n", mode_idx);
+
+ /* Enable HD Video DACs */
+ hda_enable_hd_dacs(hda, true);
+
+ /* Configure scaler */
+ hda_write(hda, SCALE_CTRL_Y_DFLT, HDA_ANA_SCALE_CTRL_Y);
+ hda_write(hda, SCALE_CTRL_CB_DFLT, HDA_ANA_SCALE_CTRL_CB);
+ hda_write(hda, SCALE_CTRL_CR_DFLT, HDA_ANA_SCALE_CTRL_CR);
+
+ /* Configure sampler */
+ hda_write(hda , src_filter_y, HDA_ANA_SRC_Y_CFG);
+ hda_write(hda, src_filter_c, HDA_ANA_SRC_C_CFG);
+ for (i = 0; i < SAMPLER_COEF_NB; i++) {
+ hda_write(hda, coef_y[i], HDA_COEFF_Y_PH1_TAP123 + i * 4);
+ hda_write(hda, coef_c[i], HDA_COEFF_C_PH1_TAP123 + i * 4);
+ }
+
+ /* Configure main HDFormatter */
+ val = 0;
+ val |= (hda->mode.flags & DRM_MODE_FLAG_INTERLACE) ?
+ 0 : CFG_AWG_ASYNC_VSYNC_MTD;
+ val |= (CFG_PBPR_SYNC_OFF_VAL << CFG_PBPR_SYNC_OFF_SHIFT);
+ val |= filter_mode;
+ hda_write(hda, val, HDA_ANA_CFG);
+
+ /* Configure AWG */
+ sti_hda_configure_awg(hda, hda_supported_modes[mode_idx].awg_instr,
+ hda_supported_modes[mode_idx].nb_instr);
+
+ /* Enable AWG */
+ val = hda_read(hda, HDA_ANA_CFG);
+ val |= CFG_AWG_ASYNC_EN;
+ hda_write(hda, val, HDA_ANA_CFG);
+
+ hda->enabled = true;
+}
+
+static void sti_hda_set_mode(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 mode_idx;
+ int hddac_rate;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ switch (hda_supported_modes[mode_idx].vid_cat) {
+ case VID_HD_74M:
+ /* HD use alternate 2x filter */
+ hddac_rate = mode->clock * 1000 * 2;
+ break;
+ case VID_ED:
+ /* ED uses 4x filter */
+ hddac_rate = mode->clock * 1000 * 4;
+ break;
+ default:
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ /* HD DAC = 148.5Mhz or 108 Mhz */
+ ret = clk_set_rate(hda->clk_hddac, hddac_rate);
+ if (ret < 0)
+ DRM_ERROR("Cannot set rate (%dHz) for hda_hddac clk\n",
+ hddac_rate);
+
+ /* HDformatter clock = compositor clock */
+ ret = clk_set_rate(hda->clk_pix, mode->clock * 1000);
+ if (ret < 0)
+ DRM_ERROR("Cannot set rate (%dHz) for hda_pix clk\n",
+ mode->clock * 1000);
+}
+
+static void sti_hda_bridge_nope(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static void sti_hda_brigde_destroy(struct drm_bridge *bridge)
+{
+ drm_bridge_cleanup(bridge);
+ kfree(bridge);
+}
+
+static const struct drm_bridge_funcs sti_hda_bridge_funcs = {
+ .pre_enable = sti_hda_pre_enable,
+ .enable = sti_hda_bridge_nope,
+ .disable = sti_hda_disable,
+ .post_disable = sti_hda_bridge_nope,
+ .mode_set = sti_hda_set_mode,
+ .destroy = sti_hda_brigde_destroy,
+};
+
+static int sti_hda_connector_get_modes(struct drm_connector *connector)
+{
+ unsigned int i;
+ int count = 0;
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(hda->drm_dev,
+ &hda_supported_modes[i].mode);
+ if (!mode)
+ continue;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ /* the first mode is the preferred mode */
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ count++;
+ }
+
+ drm_mode_sort(&connector->modes);
+
+ return count;
+}
+
+#define CLK_TOLERANCE_HZ 50
+
+static int sti_hda_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+ int target_max = target + CLK_TOLERANCE_HZ;
+ int result;
+ int idx;
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ if (!hda_get_mode_idx(*mode, &idx)) {
+ return MODE_BAD;
+ } else {
+ result = clk_round_rate(hda->clk_pix, target);
+
+ DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
+ target, result);
+
+ if ((result < target_min) || (result > target_max)) {
+ DRM_DEBUG_DRIVER("hda pixclk=%d not supported\n",
+ target);
+ return MODE_BAD;
+ }
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
+{
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+
+ /* Best encoder is the one associated during connector creation */
+ return hda_connector->encoder;
+}
+
+static struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
+ .get_modes = sti_hda_connector_get_modes,
+ .mode_valid = sti_hda_connector_mode_valid,
+ .best_encoder = sti_hda_best_encoder,
+};
+
+static enum drm_connector_status
+sti_hda_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void sti_hda_connector_destroy(struct drm_connector *connector)
+{
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(hda_connector);
+}
+
+static struct drm_connector_funcs sti_hda_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = sti_hda_connector_detect,
+ .destroy = sti_hda_connector_destroy,
+};
+
+static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int sti_hda_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hda *hda = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct sti_hda_connector *connector;
+ struct drm_connector *drm_connector;
+ struct drm_bridge *bridge;
+ int err;
+
+ /* Set the drm device handle */
+ hda->drm_dev = drm_dev;
+
+ encoder = sti_hda_find_encoder(drm_dev);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ connector->hda = hda;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver_private = hda;
+ drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs);
+
+ encoder->bridge = bridge;
+ connector->encoder = encoder;
+
+ drm_connector = (struct drm_connector *)connector;
+
+ drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm_dev, drm_connector,
+ &sti_hda_connector_funcs, DRM_MODE_CONNECTOR_Component);
+ drm_connector_helper_add(drm_connector,
+ &sti_hda_connector_helper_funcs);
+
+ err = drm_connector_register(drm_connector);
+ if (err)
+ goto err_connector;
+
+ err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ if (err) {
+ DRM_ERROR("Failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(drm_connector);
+err_connector:
+ drm_bridge_cleanup(bridge);
+ drm_connector_cleanup(drm_connector);
+ return -EINVAL;
+}
+
+static void sti_hda_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hda_ops = {
+ .bind = sti_hda_bind,
+ .unbind = sti_hda_unbind,
+};
+
+static int sti_hda_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_hda *hda;
+ struct resource *res;
+
+ DRM_INFO("%s\n", __func__);
+
+ hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
+ if (!hda)
+ return -ENOMEM;
+
+ hda->dev = pdev->dev;
+
+ /* Get resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
+ if (!res) {
+ DRM_ERROR("Invalid hda resource\n");
+ return -ENOMEM;
+ }
+ hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "video-dacs-ctrl");
+ if (res) {
+ hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(hda->video_dacs_ctrl))
+ return PTR_ERR(hda->video_dacs_ctrl);
+ } else {
+ /* If no existing video-dacs-ctrl resource continue the probe */
+ DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
+ hda->video_dacs_ctrl = NULL;
+ }
+
+ /* Get clock resources */
+ hda->clk_pix = devm_clk_get(dev, "pix");
+ if (IS_ERR(hda->clk_pix)) {
+ DRM_ERROR("Cannot get hda_pix clock\n");
+ return PTR_ERR(hda->clk_pix);
+ }
+
+ hda->clk_hddac = devm_clk_get(dev, "hddac");
+ if (IS_ERR(hda->clk_hddac)) {
+ DRM_ERROR("Cannot get hda_hddac clock\n");
+ return PTR_ERR(hda->clk_hddac);
+ }
+
+ platform_set_drvdata(pdev, hda);
+
+ return component_add(&pdev->dev, &sti_hda_ops);
+}
+
+static int sti_hda_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hda_ops);
+ return 0;
+}
+
+static struct of_device_id hda_of_match[] = {
+ { .compatible = "st,stih416-hda", },
+ { .compatible = "st,stih407-hda", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, hda_of_match);
+
+struct platform_driver sti_hda_driver = {
+ .driver = {
+ .name = "sti-hda",
+ .owner = THIS_MODULE,
+ .of_match_table = hda_of_match,
+ },
+ .probe = sti_hda_probe,
+ .remove = sti_hda_remove,
+};
+
+module_platform_driver(sti_hda_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
new file mode 100644
index 000000000000..284e541d970d
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -0,0 +1,810 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/hdmi.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#include "sti_hdmi.h"
+#include "sti_hdmi_tx3g4c28phy.h"
+#include "sti_hdmi_tx3g0c55phy.h"
+#include "sti_vtg.h"
+
+#define HDMI_CFG 0x0000
+#define HDMI_INT_EN 0x0004
+#define HDMI_INT_STA 0x0008
+#define HDMI_INT_CLR 0x000C
+#define HDMI_STA 0x0010
+#define HDMI_ACTIVE_VID_XMIN 0x0100
+#define HDMI_ACTIVE_VID_XMAX 0x0104
+#define HDMI_ACTIVE_VID_YMIN 0x0108
+#define HDMI_ACTIVE_VID_YMAX 0x010C
+#define HDMI_DFLT_CHL0_DAT 0x0110
+#define HDMI_DFLT_CHL1_DAT 0x0114
+#define HDMI_DFLT_CHL2_DAT 0x0118
+#define HDMI_SW_DI_1_HEAD_WORD 0x0210
+#define HDMI_SW_DI_1_PKT_WORD0 0x0214
+#define HDMI_SW_DI_1_PKT_WORD1 0x0218
+#define HDMI_SW_DI_1_PKT_WORD2 0x021C
+#define HDMI_SW_DI_1_PKT_WORD3 0x0220
+#define HDMI_SW_DI_1_PKT_WORD4 0x0224
+#define HDMI_SW_DI_1_PKT_WORD5 0x0228
+#define HDMI_SW_DI_1_PKT_WORD6 0x022C
+#define HDMI_SW_DI_CFG 0x0230
+
+#define HDMI_IFRAME_SLOT_AVI 1
+
+#define XCAT(prefix, x, suffix) prefix ## x ## suffix
+#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
+#define HDMI_SW_DI_N_PKT_WORD0(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD0)
+#define HDMI_SW_DI_N_PKT_WORD1(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD1)
+#define HDMI_SW_DI_N_PKT_WORD2(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD2)
+#define HDMI_SW_DI_N_PKT_WORD3(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD3)
+#define HDMI_SW_DI_N_PKT_WORD4(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD4)
+#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
+#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
+
+#define HDMI_IFRAME_DISABLED 0x0
+#define HDMI_IFRAME_SINGLE_SHOT 0x1
+#define HDMI_IFRAME_FIELD 0x2
+#define HDMI_IFRAME_FRAME 0x3
+#define HDMI_IFRAME_MASK 0x3
+#define HDMI_IFRAME_CFG_DI_N(x, n) ((x) << ((n-1)*4)) /* n from 1 to 6 */
+
+#define HDMI_CFG_DEVICE_EN BIT(0)
+#define HDMI_CFG_HDMI_NOT_DVI BIT(1)
+#define HDMI_CFG_HDCP_EN BIT(2)
+#define HDMI_CFG_ESS_NOT_OESS BIT(3)
+#define HDMI_CFG_H_SYNC_POL_NEG BIT(4)
+#define HDMI_CFG_SINK_TERM_DET_EN BIT(5)
+#define HDMI_CFG_V_SYNC_POL_NEG BIT(6)
+#define HDMI_CFG_422_EN BIT(8)
+#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12)
+#define HDMI_CFG_FIFO_UNDERRUN_CLR BIT(13)
+#define HDMI_CFG_SW_RST_EN BIT(31)
+
+#define HDMI_INT_GLOBAL BIT(0)
+#define HDMI_INT_SW_RST BIT(1)
+#define HDMI_INT_PIX_CAP BIT(3)
+#define HDMI_INT_HOT_PLUG BIT(4)
+#define HDMI_INT_DLL_LCK BIT(5)
+#define HDMI_INT_NEW_FRAME BIT(6)
+#define HDMI_INT_GENCTRL_PKT BIT(7)
+#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
+
+#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_DLL_LCK \
+ | HDMI_INT_HOT_PLUG \
+ | HDMI_INT_GLOBAL)
+
+#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_GENCTRL_PKT \
+ | HDMI_INT_NEW_FRAME \
+ | HDMI_INT_DLL_LCK \
+ | HDMI_INT_HOT_PLUG \
+ | HDMI_INT_PIX_CAP \
+ | HDMI_INT_SW_RST \
+ | HDMI_INT_GLOBAL)
+
+#define HDMI_STA_SW_RST BIT(1)
+
+struct sti_hdmi_connector {
+ struct drm_connector drm_connector;
+ struct drm_encoder *encoder;
+ struct sti_hdmi *hdmi;
+};
+
+#define to_sti_hdmi_connector(x) \
+ container_of(x, struct sti_hdmi_connector, drm_connector)
+
+u32 hdmi_read(struct sti_hdmi *hdmi, int offset)
+{
+ return readl(hdmi->regs + offset);
+}
+
+void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset)
+{
+ writel(val, hdmi->regs + offset);
+}
+
+/**
+ * HDMI interrupt handler threaded
+ *
+ * @irq: irq number
+ * @arg: connector structure
+ */
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
+{
+ struct sti_hdmi *hdmi = arg;
+
+ /* Hot plug/unplug IRQ */
+ if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
+ /* read gpio to get the status */
+ hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ if (hdmi->drm_dev)
+ drm_helper_hpd_irq_event(hdmi->drm_dev);
+ }
+
+ /* Sw reset and PLL lock are exclusive so we can use the same
+ * event to signal them
+ */
+ if (hdmi->irq_status & (HDMI_INT_SW_RST | HDMI_INT_DLL_LCK)) {
+ hdmi->event_received = true;
+ wake_up_interruptible(&hdmi->wait_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * HDMI interrupt handler
+ *
+ * @irq: irq number
+ * @arg: connector structure
+ */
+static irqreturn_t hdmi_irq(int irq, void *arg)
+{
+ struct sti_hdmi *hdmi = arg;
+
+ /* read interrupt status */
+ hdmi->irq_status = hdmi_read(hdmi, HDMI_INT_STA);
+
+ /* clear interrupt status */
+ hdmi_write(hdmi, hdmi->irq_status, HDMI_INT_CLR);
+
+ /* force sync bus write */
+ hdmi_read(hdmi, HDMI_INT_STA);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * Set hdmi active area depending on the drm display mode selected
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void hdmi_active_area(struct sti_hdmi *hdmi)
+{
+ u32 xmin, xmax;
+ u32 ymin, ymax;
+
+ xmin = sti_vtg_get_pixel_number(hdmi->mode, 0);
+ xmax = sti_vtg_get_pixel_number(hdmi->mode, hdmi->mode.hdisplay - 1);
+ ymin = sti_vtg_get_line_number(hdmi->mode, 0);
+ ymax = sti_vtg_get_line_number(hdmi->mode, hdmi->mode.vdisplay - 1);
+
+ hdmi_write(hdmi, xmin, HDMI_ACTIVE_VID_XMIN);
+ hdmi_write(hdmi, xmax, HDMI_ACTIVE_VID_XMAX);
+ hdmi_write(hdmi, ymin, HDMI_ACTIVE_VID_YMIN);
+ hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX);
+}
+
+/**
+ * Overall hdmi configuration
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void hdmi_config(struct sti_hdmi *hdmi)
+{
+ u32 conf;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Clear overrun and underrun fifo */
+ conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
+
+ /* Enable HDMI mode not DVI */
+ conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS;
+
+ /* Enable sink term detection */
+ conf |= HDMI_CFG_SINK_TERM_DET_EN;
+
+ /* Set Hsync polarity */
+ if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) {
+ DRM_DEBUG_DRIVER("H Sync Negative\n");
+ conf |= HDMI_CFG_H_SYNC_POL_NEG;
+ }
+
+ /* Set Vsync polarity */
+ if (hdmi->mode.flags & DRM_MODE_FLAG_NVSYNC) {
+ DRM_DEBUG_DRIVER("V Sync Negative\n");
+ conf |= HDMI_CFG_V_SYNC_POL_NEG;
+ }
+
+ /* Enable HDMI */
+ conf |= HDMI_CFG_DEVICE_EN;
+
+ hdmi_write(hdmi, conf, HDMI_CFG);
+}
+
+/**
+ * Prepare and configure the AVI infoframe
+ *
+ * AVI infoframe are transmitted at least once per two video field and
+ * contains information about HDMI transmission mode such as color space,
+ * colorimetry, ...
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return negative value if error occurs
+ */
+static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
+{
+ struct drm_display_mode *mode = &hdmi->mode;
+ struct hdmi_avi_infoframe infoframe;
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE;
+ u32 val;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode);
+ if (ret < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
+ return ret;
+ }
+
+ /* fixed infoframe configuration not linked to the mode */
+ infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
+
+ ret = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
+ return ret;
+ }
+
+ /* Disable transmission slot for AVI infoframe */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ /* Infoframe header */
+ val = buffer[0x0];
+ val |= buffer[0x1] << 8;
+ val |= buffer[0x2] << 16;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
+
+ /* Infoframe packet bytes */
+ val = frame[0x0];
+ val |= frame[0x1] << 8;
+ val |= frame[0x2] << 16;
+ val |= frame[0x3] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0x4];
+ val |= frame[0x5] << 8;
+ val |= frame[0x6] << 16;
+ val |= frame[0x7] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0x8];
+ val |= frame[0x9] << 8;
+ val |= frame[0xA] << 16;
+ val |= frame[0xB] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0xC];
+ val |= frame[0xD] << 8;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
+
+ /* Enable transmission slot for AVI infoframe
+ * According to the hdmi specification, AVI infoframe should be
+ * transmitted at least once per two video fields
+ */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ return 0;
+}
+
+/**
+ * Software reset of the hdmi subsystem
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ */
+#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
+static void hdmi_swreset(struct sti_hdmi *hdmi)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Enable hdmi_audio clock only during hdmi reset */
+ if (clk_prepare_enable(hdmi->clk_audio))
+ DRM_INFO("Failed to prepare/enable hdmi_audio clk\n");
+
+ /* Sw reset */
+ hdmi->event_received = false;
+
+ val = hdmi_read(hdmi, HDMI_CFG);
+ val |= HDMI_CFG_SW_RST_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ /* Wait reset completed */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_SWRESET));
+
+ /*
+ * HDMI_STA_SW_RST bit is set to '1' when SW_RST bit in HDMI_CFG is
+ * set to '1' and clk_audio is running.
+ */
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_SW_RST) == 0)
+ DRM_DEBUG_DRIVER("Warning: HDMI sw reset timeout occurs\n");
+
+ val = hdmi_read(hdmi, HDMI_CFG);
+ val &= ~HDMI_CFG_SW_RST_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ /* Disable hdmi_audio clock. Not used anymore for drm purpose */
+ clk_disable_unprepare(hdmi->clk_audio);
+}
+
+static void sti_hdmi_disable(struct drm_bridge *bridge)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+
+ u32 val = hdmi_read(hdmi, HDMI_CFG);
+
+ if (!hdmi->enabled)
+ return;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable HDMI */
+ val &= ~HDMI_CFG_DEVICE_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ hdmi_write(hdmi, 0xffffffff, HDMI_INT_CLR);
+
+ /* Stop the phy */
+ hdmi->phy_ops->stop(hdmi);
+
+ /* Set the default channel data to be a dark red */
+ hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
+ hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
+ hdmi_write(hdmi, 0x0060, HDMI_DFLT_CHL2_DAT);
+
+ /* Disable/unprepare hdmi clock */
+ clk_disable_unprepare(hdmi->clk_phy);
+ clk_disable_unprepare(hdmi->clk_tmds);
+ clk_disable_unprepare(hdmi->clk_pix);
+
+ hdmi->enabled = false;
+}
+
+static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hdmi->enabled)
+ return;
+
+ /* Prepare/enable clocks */
+ if (clk_prepare_enable(hdmi->clk_pix))
+ DRM_ERROR("Failed to prepare/enable hdmi_pix clk\n");
+ if (clk_prepare_enable(hdmi->clk_tmds))
+ DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n");
+ if (clk_prepare_enable(hdmi->clk_phy))
+ DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n");
+
+ hdmi->enabled = true;
+
+ /* Program hdmi serializer and start phy */
+ if (!hdmi->phy_ops->start(hdmi)) {
+ DRM_ERROR("Unable to start hdmi phy\n");
+ return;
+ }
+
+ /* Program hdmi active area */
+ hdmi_active_area(hdmi);
+
+ /* Enable working interrupts */
+ hdmi_write(hdmi, HDMI_WORKING_INT, HDMI_INT_EN);
+
+ /* Program hdmi config */
+ hdmi_config(hdmi);
+
+ /* Program AVI infoframe */
+ if (hdmi_avi_infoframe_config(hdmi))
+ DRM_ERROR("Unable to configure AVI infoframe\n");
+
+ /* Sw reset */
+ hdmi_swreset(hdmi);
+}
+
+static void sti_hdmi_set_mode(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Copy the drm display mode in the connector local structure */
+ memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
+
+ /* Update clock framerate according to the selected mode */
+ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for hdmi_pix clk\n",
+ mode->clock * 1000);
+ return;
+ }
+ ret = clk_set_rate(hdmi->clk_phy, mode->clock * 1000);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for hdmi_rejection_pll clk\n",
+ mode->clock * 1000);
+ return;
+ }
+}
+
+static void sti_hdmi_bridge_nope(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge)
+{
+ drm_bridge_cleanup(bridge);
+ kfree(bridge);
+}
+
+static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
+ .pre_enable = sti_hdmi_pre_enable,
+ .enable = sti_hdmi_bridge_nope,
+ .disable = sti_hdmi_disable,
+ .post_disable = sti_hdmi_bridge_nope,
+ .mode_set = sti_hdmi_set_mode,
+ .destroy = sti_hdmi_brigde_destroy,
+};
+
+static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct i2c_adapter *i2c_adap;
+ struct edid *edid;
+ int count;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ i2c_adap = i2c_get_adapter(1);
+ if (!i2c_adap)
+ goto fail;
+
+ edid = drm_get_edid(connector, i2c_adap);
+ if (!edid)
+ goto fail;
+
+ count = drm_add_edid_modes(connector, edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ kfree(edid);
+ return count;
+
+fail:
+ DRM_ERROR("Can not read HDMI EDID\n");
+ return 0;
+}
+
+#define CLK_TOLERANCE_HZ 50
+
+static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+ int target_max = target + CLK_TOLERANCE_HZ;
+ int result;
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+
+ result = clk_round_rate(hdmi->clk_pix, target);
+
+ DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
+ target, result);
+
+ if ((result < target_min) || (result > target_max)) {
+ DRM_DEBUG_DRIVER("hdmi pixclk=%d not supported\n", target);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+
+ /* Best encoder is the one associated during connector creation */
+ return hdmi_connector->encoder;
+}
+
+static struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
+ .get_modes = sti_hdmi_connector_get_modes,
+ .mode_valid = sti_hdmi_connector_mode_valid,
+ .best_encoder = sti_hdmi_best_encoder,
+};
+
+/* get detection status of display device */
+static enum drm_connector_status
+sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hdmi->hpd) {
+ DRM_DEBUG_DRIVER("hdmi cable connected\n");
+ return connector_status_connected;
+ }
+
+ DRM_DEBUG_DRIVER("hdmi cable disconnected\n");
+ return connector_status_disconnected;
+}
+
+static void sti_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(hdmi_connector);
+}
+
+static struct drm_connector_funcs sti_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = sti_hdmi_connector_detect,
+ .destroy = sti_hdmi_connector_destroy,
+};
+
+static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct sti_hdmi_connector *connector;
+ struct drm_connector *drm_connector;
+ struct drm_bridge *bridge;
+ struct i2c_adapter *i2c_adap;
+ int err;
+
+ i2c_adap = i2c_get_adapter(1);
+ if (!i2c_adap)
+ return -EPROBE_DEFER;
+
+ /* Set the drm device handle */
+ hdmi->drm_dev = drm_dev;
+
+ encoder = sti_hdmi_find_encoder(drm_dev);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ connector->hdmi = hdmi;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver_private = hdmi;
+ drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
+
+ encoder->bridge = bridge;
+ connector->encoder = encoder;
+
+ drm_connector = (struct drm_connector *)connector;
+
+ drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm_dev, drm_connector,
+ &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(drm_connector,
+ &sti_hdmi_connector_helper_funcs);
+
+ err = drm_connector_register(drm_connector);
+ if (err)
+ goto err_connector;
+
+ err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ if (err) {
+ DRM_ERROR("Failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ /* Enable default interrupts */
+ hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(drm_connector);
+err_connector:
+ drm_bridge_cleanup(bridge);
+ drm_connector_cleanup(drm_connector);
+ return -EINVAL;
+}
+
+static void sti_hdmi_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hdmi_ops = {
+ .bind = sti_hdmi_bind,
+ .unbind = sti_hdmi_unbind,
+};
+
+static struct of_device_id hdmi_of_match[] = {
+ {
+ .compatible = "st,stih416-hdmi",
+ .data = &tx3g0c55phy_ops,
+ }, {
+ .compatible = "st,stih407-hdmi",
+ .data = &tx3g4c28phy_ops,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, hdmi_of_match);
+
+static int sti_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_hdmi *hdmi;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ int ret;
+
+ DRM_INFO("%s\n", __func__);
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = pdev->dev;
+
+ /* Get resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
+ if (!res) {
+ DRM_ERROR("Invalid hdmi resource\n");
+ return -ENOMEM;
+ }
+ hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ if (of_device_is_compatible(np, "st,stih416-hdmi")) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "syscfg");
+ if (!res) {
+ DRM_ERROR("Invalid syscfg resource\n");
+ return -ENOMEM;
+ }
+ hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(hdmi->syscfg))
+ return PTR_ERR(hdmi->syscfg);
+
+ }
+
+ hdmi->phy_ops = (struct hdmi_phy_ops *)
+ of_match_node(hdmi_of_match, np)->data;
+
+ /* Get clock resources */
+ hdmi->clk_pix = devm_clk_get(dev, "pix");
+ if (IS_ERR(hdmi->clk_pix)) {
+ DRM_ERROR("Cannot get hdmi_pix clock\n");
+ return PTR_ERR(hdmi->clk_pix);
+ }
+
+ hdmi->clk_tmds = devm_clk_get(dev, "tmds");
+ if (IS_ERR(hdmi->clk_tmds)) {
+ DRM_ERROR("Cannot get hdmi_tmds clock\n");
+ return PTR_ERR(hdmi->clk_tmds);
+ }
+
+ hdmi->clk_phy = devm_clk_get(dev, "phy");
+ if (IS_ERR(hdmi->clk_phy)) {
+ DRM_ERROR("Cannot get hdmi_phy clock\n");
+ return PTR_ERR(hdmi->clk_phy);
+ }
+
+ hdmi->clk_audio = devm_clk_get(dev, "audio");
+ if (IS_ERR(hdmi->clk_audio)) {
+ DRM_ERROR("Cannot get hdmi_audio clock\n");
+ return PTR_ERR(hdmi->clk_audio);
+ }
+
+ hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0);
+ if (hdmi->hpd_gpio < 0) {
+ DRM_ERROR("Failed to get hdmi hpd-gpio\n");
+ return -EIO;
+ }
+
+ hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+
+ init_waitqueue_head(&hdmi->wait_event);
+
+ hdmi->irq = platform_get_irq_byname(pdev, "irq");
+
+ ret = devm_request_threaded_irq(dev, hdmi->irq, hdmi_irq,
+ hdmi_irq_thread, IRQF_ONESHOT, dev_name(dev), hdmi);
+ if (ret) {
+ DRM_ERROR("Failed to register HDMI interrupt\n");
+ return ret;
+ }
+
+ hdmi->reset = devm_reset_control_get(dev, "hdmi");
+ /* Take hdmi out of reset */
+ if (!IS_ERR(hdmi->reset))
+ reset_control_deassert(hdmi->reset);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return component_add(&pdev->dev, &sti_hdmi_ops);
+}
+
+static int sti_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hdmi_ops);
+ return 0;
+}
+
+struct platform_driver sti_hdmi_driver = {
+ .driver = {
+ .name = "sti-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = hdmi_of_match,
+ },
+ .probe = sti_hdmi_probe,
+ .remove = sti_hdmi_remove,
+};
+
+module_platform_driver(sti_hdmi_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
new file mode 100644
index 000000000000..61bec6557ceb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_H_
+#define _STI_HDMI_H_
+
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+#define HDMI_STA 0x0010
+#define HDMI_STA_DLL_LCK BIT(5)
+
+struct sti_hdmi;
+
+struct hdmi_phy_ops {
+ bool (*start)(struct sti_hdmi *hdmi);
+ void (*stop)(struct sti_hdmi *hdmi);
+};
+
+/**
+ * STI hdmi structure
+ *
+ * @dev: driver device
+ * @drm_dev: pointer to drm device
+ * @mode: current display mode selected
+ * @regs: hdmi register
+ * @syscfg: syscfg register for pll rejection configuration
+ * @clk_pix: hdmi pixel clock
+ * @clk_tmds: hdmi tmds clock
+ * @clk_phy: hdmi phy clock
+ * @clk_audio: hdmi audio clock
+ * @irq: hdmi interrupt number
+ * @irq_status: interrupt status register
+ * @phy_ops: phy start/stop operations
+ * @enabled: true if hdmi is enabled else false
+ * @hpd_gpio: hdmi hot plug detect gpio number
+ * @hpd: hot plug detect status
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @reset: reset control of the hdmi phy
+ */
+struct sti_hdmi {
+ struct device dev;
+ struct drm_device *drm_dev;
+ struct drm_display_mode mode;
+ void __iomem *regs;
+ void __iomem *syscfg;
+ struct clk *clk_pix;
+ struct clk *clk_tmds;
+ struct clk *clk_phy;
+ struct clk *clk_audio;
+ int irq;
+ u32 irq_status;
+ struct hdmi_phy_ops *phy_ops;
+ bool enabled;
+ int hpd_gpio;
+ bool hpd;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ struct reset_control *reset;
+};
+
+u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
+void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset);
+
+/**
+ * hdmi phy config structure
+ *
+ * A pointer to an array of these structures is passed to a TMDS (HDMI) output
+ * via the control interface to provide board and SoC specific
+ * configurations of the HDMI PHY. Each entry in the array specifies a hardware
+ * specific configuration for a given TMDS clock frequency range.
+ *
+ * @min_tmds_freq: Lower bound of TMDS clock frequency this entry applies to
+ * @max_tmds_freq: Upper bound of TMDS clock frequency this entry applies to
+ * @config: SoC specific register configuration
+ */
+struct hdmi_phy_config {
+ u32 min_tmds_freq;
+ u32 max_tmds_freq;
+ u32 config[4];
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
new file mode 100644
index 000000000000..49ae8e44b285
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_hdmi_tx3g0c55phy.h"
+
+#define HDMI_SRZ_PLL_CFG 0x0504
+#define HDMI_SRZ_TAP_1 0x0508
+#define HDMI_SRZ_TAP_2 0x050C
+#define HDMI_SRZ_TAP_3 0x0510
+#define HDMI_SRZ_CTRL 0x0514
+
+#define HDMI_SRZ_PLL_CFG_POWER_DOWN BIT(0)
+#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT 1
+#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ 0
+#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ 1
+#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ 2
+#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ 3
+#define HDMI_SRZ_PLL_CFG_VCOR_MASK 3
+#define HDMI_SRZ_PLL_CFG_VCOR(x) (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT)
+#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT 8
+#define HDMI_SRZ_PLL_CFG_NDIV_MASK (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT)
+#define HDMI_SRZ_PLL_CFG_MODE_SHIFT 16
+#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ 0x1
+#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ 0x4
+#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ 0x5
+#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6
+#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ 0x7
+#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ 0x8
+#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ 0x9
+#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA
+#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ 0xB
+#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ 0xC
+#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ 0xD
+#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE
+#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ 0xF
+#define HDMI_SRZ_PLL_CFG_MODE_MASK 0xF
+#define HDMI_SRZ_PLL_CFG_MODE(x) (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT)
+
+#define HDMI_SRZ_CTRL_POWER_DOWN (1 << 0)
+#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN (1 << 1)
+
+/* sysconf registers */
+#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858 /* SYSTEM_CONFIG2534 */
+#define HDMI_REJECTION_PLL_STATUS 0x0948 /* SYSTEM_CONFIG2594 */
+
+#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0
+#define REJECTION_PLL_HDMI_ENABLE_MASK (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT)
+#define REJECTION_PLL_HDMI_PDIV_SHIFT 24
+#define REJECTION_PLL_HDMI_PDIV_MASK (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT)
+#define REJECTION_PLL_HDMI_NDIV_SHIFT 16
+#define REJECTION_PLL_HDMI_NDIV_MASK (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT)
+#define REJECTION_PLL_HDMI_MDIV_SHIFT 8
+#define REJECTION_PLL_HDMI_MDIV_MASK (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT)
+
+#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0)
+
+#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
+
+/**
+ * pll mode structure
+ *
+ * A pointer to an array of these structures is passed to a TMDS (HDMI) output
+ * via the control interface to provide board and SoC specific
+ * configurations of the HDMI PHY. Each entry in the array specifies a hardware
+ * specific configuration for a given TMDS clock frequency range. The array
+ * should be terminated with an entry that has all fields set to zero.
+ *
+ * @min: Lower bound of TMDS clock frequency this entry applies to
+ * @max: Upper bound of TMDS clock frequency this entry applies to
+ * @mode: SoC specific register configuration
+ */
+struct pllmode {
+ u32 min;
+ u32 max;
+ u32 mode;
+};
+
+#define NB_PLL_MODE 7
+static struct pllmode pllmodes[NB_PLL_MODE] = {
+ {13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ},
+ {25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ},
+ {27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ},
+ {54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ},
+ {72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ},
+ {108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ},
+ {148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ}
+};
+
+#define NB_HDMI_PHY_CONFIG 5
+static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
+ {0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} },
+ {40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} },
+ {140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} },
+ {160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} },
+ {250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} },
+};
+
+#define PLL_CHANGE_DELAY 1 /* ms */
+
+/**
+ * Disable the pll rejection
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * return true if the pll has been disabled
+ */
+static bool disable_pll_rejection(struct sti_hdmi *hdmi)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+ val &= ~REJECTION_PLL_HDMI_ENABLE_MASK;
+ writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ msleep(PLL_CHANGE_DELAY);
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
+
+ return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
+}
+
+/**
+ * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL
+ * clock input to the new PHY PLL that generates the serializer clock
+ * (TMDS*10) and the TMDS clock which is now fed back into the HDMI
+ * formatter instead of the TMDS clock line from ClockGenB.
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * return true if pll has been correctly set
+ */
+static bool enable_pll_rejection(struct sti_hdmi *hdmi)
+{
+ unsigned int inputclock;
+ u32 mdiv, ndiv, pdiv, val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!disable_pll_rejection(hdmi))
+ return false;
+
+ inputclock = hdmi->mode.clock * 1000;
+
+ DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock);
+
+
+ /* Power up the HDMI rejection PLL
+ * Note: On this SoC (stiH416) we are forced to have the input clock
+ * be equal to the HDMI pixel clock.
+ *
+ * The values here have been suggested by validation however they are
+ * still provisional and subject to change.
+ *
+ * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv)
+ */
+ if (inputclock < 50000000) {
+ /*
+ * For slower clocks we need to multiply more to keep the
+ * internal VCO frequency within the physical specification
+ * of the PLL.
+ */
+ pdiv = 4;
+ ndiv = 240;
+ mdiv = 30;
+ } else {
+ pdiv = 2;
+ ndiv = 60;
+ mdiv = 30;
+ }
+
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ val &= ~(REJECTION_PLL_HDMI_PDIV_MASK |
+ REJECTION_PLL_HDMI_NDIV_MASK |
+ REJECTION_PLL_HDMI_MDIV_MASK |
+ REJECTION_PLL_HDMI_ENABLE_MASK);
+
+ val |= (pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) |
+ (ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) |
+ (mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) |
+ (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT);
+
+ writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ msleep(PLL_CHANGE_DELAY);
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
+
+ return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
+}
+
+/**
+ * Start hdmi phy macro cell tx3g0c55
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return false if an error occur
+ */
+static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi)
+{
+ u32 ckpxpll = hdmi->mode.clock * 1000;
+ u32 val, tmdsck, freqvco, pllctrl = 0;
+ unsigned int i;
+
+ if (!enable_pll_rejection(hdmi))
+ return false;
+
+ DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
+
+ /* Assuming no pixel repetition and 24bits color */
+ tmdsck = ckpxpll;
+ pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT;
+
+ /*
+ * Setup the PLL mode parameter based on the ckpxpll. If we haven't got
+ * a clock frequency supported by one of the specific PLL modes then we
+ * will end up using the generic mode (0) which only supports a 10x
+ * multiplier, hence only 24bit color.
+ */
+ for (i = 0; i < NB_PLL_MODE; i++) {
+ if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max)
+ pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode);
+ }
+
+ freqvco = tmdsck * 10;
+ if (freqvco <= 425000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ);
+ else if (freqvco <= 850000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ);
+ else if (freqvco <= 1700000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ);
+ else if (freqvco <= 2970000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ);
+ else {
+ DRM_ERROR("PHY serializer clock out of range\n");
+ goto err;
+ }
+
+ /*
+ * Configure and power up the PHY PLL
+ */
+ hdmi->event_received = false;
+ DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
+ hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
+ DRM_ERROR("hdmi phy pll not locked\n");
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
+
+ /*
+ * To configure the source termination and pre-emphasis appropriately
+ * for different high speed TMDS clock frequencies a phy configuration
+ * table must be provided, tailored to the SoC and board combination.
+ */
+ for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
+ if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
+ (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
+ val = hdmiphy_config[i].config[0];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_1);
+ val = hdmiphy_config[i].config[1];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_2);
+ val = hdmiphy_config[i].config[2];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_3);
+ val = hdmiphy_config[i].config[3];
+ val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN;
+ val &= ~HDMI_SRZ_CTRL_POWER_DOWN;
+ hdmi_write(hdmi, val, HDMI_SRZ_CTRL);
+
+ DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n",
+ hdmiphy_config[i].config[0],
+ hdmiphy_config[i].config[1],
+ hdmiphy_config[i].config[2],
+ hdmiphy_config[i].config[3]);
+ return true;
+ }
+ }
+
+ /*
+ * Default, power up the serializer with no pre-emphasis or source
+ * termination.
+ */
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3);
+ hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL);
+
+ return true;
+
+err:
+ disable_pll_rejection(hdmi);
+
+ return false;
+}
+
+/**
+ * Stop hdmi phy macro cell tx3g0c55
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->event_received = false;
+
+ hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL);
+ hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
+ DRM_ERROR("hdmi phy pll not well disabled\n");
+
+ disable_pll_rejection(hdmi);
+}
+
+struct hdmi_phy_ops tx3g0c55phy_ops = {
+ .start = sti_hdmi_tx3g0c55phy_start,
+ .stop = sti_hdmi_tx3g0c55phy_stop,
+};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
new file mode 100644
index 000000000000..068237b3a303
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_TX3G0C55PHY_H_
+#define _STI_HDMI_TX3G0C55PHY_H_
+
+#include "sti_hdmi.h"
+
+extern struct hdmi_phy_ops tx3g0c55phy_ops;
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
new file mode 100644
index 000000000000..8e0ceb0ced33
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_hdmi_tx3g4c28phy.h"
+
+#define HDMI_SRZ_CFG 0x504
+#define HDMI_SRZ_PLL_CFG 0x510
+#define HDMI_SRZ_ICNTL 0x518
+#define HDMI_SRZ_CALCODE_EXT 0x520
+
+#define HDMI_SRZ_CFG_EN BIT(0)
+#define HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT BIT(1)
+#define HDMI_SRZ_CFG_EXTERNAL_DATA BIT(16)
+#define HDMI_SRZ_CFG_RBIAS_EXT BIT(17)
+#define HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION BIT(18)
+#define HDMI_SRZ_CFG_EN_BIASRES_DETECTION BIT(19)
+#define HDMI_SRZ_CFG_EN_SRC_TERMINATION BIT(24)
+
+#define HDMI_SRZ_CFG_INTERNAL_MASK (HDMI_SRZ_CFG_EN | \
+ HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT | \
+ HDMI_SRZ_CFG_EXTERNAL_DATA | \
+ HDMI_SRZ_CFG_RBIAS_EXT | \
+ HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION | \
+ HDMI_SRZ_CFG_EN_BIASRES_DETECTION | \
+ HDMI_SRZ_CFG_EN_SRC_TERMINATION)
+
+#define PLL_CFG_EN BIT(0)
+#define PLL_CFG_NDIV_SHIFT (8)
+#define PLL_CFG_IDF_SHIFT (16)
+#define PLL_CFG_ODF_SHIFT (24)
+
+#define ODF_DIV_1 (0)
+#define ODF_DIV_2 (1)
+#define ODF_DIV_4 (2)
+#define ODF_DIV_8 (3)
+
+#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
+
+struct plldividers_s {
+ uint32_t min;
+ uint32_t max;
+ uint32_t idf;
+ uint32_t odf;
+};
+
+/*
+ * Functional specification recommended values
+ */
+#define NB_PLL_MODE 5
+static struct plldividers_s plldividers[NB_PLL_MODE] = {
+ {0, 20000000, 1, ODF_DIV_8},
+ {20000000, 42500000, 2, ODF_DIV_8},
+ {42500000, 85000000, 4, ODF_DIV_4},
+ {85000000, 170000000, 8, ODF_DIV_2},
+ {170000000, 340000000, 16, ODF_DIV_1}
+};
+
+#define NB_HDMI_PHY_CONFIG 2
+static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
+ {0, 250000000, {0x0, 0x0, 0x0, 0x0} },
+ {250000000, 300000000, {0x1110, 0x0, 0x0, 0x0} },
+};
+
+/**
+ * Start hdmi phy macro cell tx3g4c28
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return false if an error occur
+ */
+static bool sti_hdmi_tx3g4c28phy_start(struct sti_hdmi *hdmi)
+{
+ u32 ckpxpll = hdmi->mode.clock * 1000;
+ u32 val, tmdsck, idf, odf, pllctrl = 0;
+ bool foundplldivides = false;
+ int i;
+
+ DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
+
+ for (i = 0; i < NB_PLL_MODE; i++) {
+ if (ckpxpll >= plldividers[i].min &&
+ ckpxpll < plldividers[i].max) {
+ idf = plldividers[i].idf;
+ odf = plldividers[i].odf;
+ foundplldivides = true;
+ break;
+ }
+ }
+
+ if (!foundplldivides) {
+ DRM_ERROR("input TMDS clock speed (%d) not supported\n",
+ ckpxpll);
+ goto err;
+ }
+
+ /* Assuming no pixel repetition and 24bits color */
+ tmdsck = ckpxpll;
+ pllctrl |= 40 << PLL_CFG_NDIV_SHIFT;
+
+ if (tmdsck > 340000000) {
+ DRM_ERROR("output TMDS clock (%d) out of range\n", tmdsck);
+ goto err;
+ }
+
+ pllctrl |= idf << PLL_CFG_IDF_SHIFT;
+ pllctrl |= odf << PLL_CFG_ODF_SHIFT;
+
+ /*
+ * Configure and power up the PHY PLL
+ */
+ hdmi->event_received = false;
+ DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
+ hdmi_write(hdmi, (pllctrl | PLL_CFG_EN), HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
+ DRM_ERROR("hdmi phy pll not locked\n");
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
+
+ val = (HDMI_SRZ_CFG_EN |
+ HDMI_SRZ_CFG_EXTERNAL_DATA |
+ HDMI_SRZ_CFG_EN_BIASRES_DETECTION |
+ HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION);
+
+ if (tmdsck > 165000000)
+ val |= HDMI_SRZ_CFG_EN_SRC_TERMINATION;
+
+ /*
+ * To configure the source termination and pre-emphasis appropriately
+ * for different high speed TMDS clock frequencies a phy configuration
+ * table must be provided, tailored to the SoC and board combination.
+ */
+ for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
+ if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
+ (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
+ val |= (hdmiphy_config[i].config[0]
+ & ~HDMI_SRZ_CFG_INTERNAL_MASK);
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+
+ val = hdmiphy_config[i].config[1];
+ hdmi_write(hdmi, val, HDMI_SRZ_ICNTL);
+
+ val = hdmiphy_config[i].config[2];
+ hdmi_write(hdmi, val, HDMI_SRZ_CALCODE_EXT);
+
+ DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x\n",
+ hdmiphy_config[i].config[0],
+ hdmiphy_config[i].config[1],
+ hdmiphy_config[i].config[2]);
+ return true;
+ }
+ }
+
+ /*
+ * Default, power up the serializer with no pre-emphasis or
+ * output swing correction
+ */
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_ICNTL);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_CALCODE_EXT);
+
+ return true;
+
+err:
+ return false;
+}
+
+/**
+ * Stop hdmi phy macro cell tx3g4c28
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void sti_hdmi_tx3g4c28phy_stop(struct sti_hdmi *hdmi)
+{
+ int val = 0;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->event_received = false;
+
+ val = HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION;
+ val |= HDMI_SRZ_CFG_EN_BIASRES_DETECTION;
+
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+ hdmi_write(hdmi, 0, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
+ DRM_ERROR("hdmi phy pll not well disabled\n");
+}
+
+struct hdmi_phy_ops tx3g4c28phy_ops = {
+ .start = sti_hdmi_tx3g4c28phy_start,
+ .stop = sti_hdmi_tx3g4c28phy_stop,
+};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
new file mode 100644
index 000000000000..f99a7ff281ef
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_TX3G4C28PHY_H_
+#define _STI_HDMI_TX3G4C28PHY_H_
+
+#include "sti_hdmi.h"
+
+extern struct hdmi_phy_ops tx3g4c28phy_ops;
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
new file mode 100644
index 000000000000..06a587c4f1bb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_gdp.h"
+#include "sti_layer.h"
+#include "sti_vid.h"
+
+const char *sti_layer_to_str(struct sti_layer *layer)
+{
+ switch (layer->desc) {
+ case STI_GDP_0:
+ return "GDP0";
+ case STI_GDP_1:
+ return "GDP1";
+ case STI_GDP_2:
+ return "GDP2";
+ case STI_GDP_3:
+ return "GDP3";
+ case STI_VID_0:
+ return "VID0";
+ case STI_VID_1:
+ return "VID1";
+ case STI_CURSOR:
+ return "CURSOR";
+ default:
+ return "<UNKNOWN LAYER>";
+ }
+}
+
+struct sti_layer *sti_layer_create(struct device *dev, int desc,
+ void __iomem *baseaddr)
+{
+
+ struct sti_layer *layer = NULL;
+
+ switch (desc & STI_LAYER_TYPE_MASK) {
+ case STI_GDP:
+ layer = sti_gdp_create(dev, desc);
+ break;
+ case STI_VID:
+ layer = sti_vid_create(dev);
+ break;
+ }
+
+ if (!layer) {
+ DRM_ERROR("Failed to create layer\n");
+ return NULL;
+ }
+
+ layer->desc = desc;
+ layer->dev = dev;
+ layer->regs = baseaddr;
+
+ layer->ops->init(layer);
+
+ DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
+
+ return layer;
+}
+
+int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+ struct drm_display_mode *mode, int mixer_id,
+ int dest_x, int dest_y, int dest_w, int dest_h,
+ int src_x, int src_y, int src_w, int src_h)
+{
+ int ret;
+ unsigned int i;
+ struct drm_gem_cma_object *cma_obj;
+
+ if (!layer || !fb || !mode) {
+ DRM_ERROR("Null fb, layer or mode\n");
+ return 1;
+ }
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return 1;
+ }
+
+ layer->fb = fb;
+ layer->mode = mode;
+ layer->mixer_id = mixer_id;
+ layer->dst_x = dest_x;
+ layer->dst_y = dest_y;
+ layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
+ layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
+ layer->src_x = src_x;
+ layer->src_y = src_y;
+ layer->src_w = src_w;
+ layer->src_h = src_h;
+ layer->format = fb->pixel_format;
+ layer->paddr = cma_obj->paddr;
+ for (i = 0; i < 4; i++) {
+ layer->pitches[i] = fb->pitches[i];
+ layer->offsets[i] = fb->offsets[i];
+ }
+
+ DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
+ sti_layer_to_str(layer),
+ layer->mixer_id);
+ DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_layer_to_str(layer),
+ layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
+ layer->src_w, layer->src_h, layer->src_x,
+ layer->src_y);
+
+ DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+ (char *)&layer->format, (unsigned long)layer->paddr);
+
+ if (!layer->ops->prepare)
+ goto err_no_prepare;
+
+ ret = layer->ops->prepare(layer, !layer->enabled);
+ if (!ret)
+ layer->enabled = true;
+
+ return ret;
+
+err_no_prepare:
+ DRM_ERROR("Cannot prepare\n");
+ return 1;
+}
+
+int sti_layer_commit(struct sti_layer *layer)
+{
+ if (!layer)
+ return 1;
+
+ if (!layer->ops->commit)
+ goto err_no_commit;
+
+ return layer->ops->commit(layer);
+
+err_no_commit:
+ DRM_ERROR("Cannot commit\n");
+ return 1;
+}
+
+int sti_layer_disable(struct sti_layer *layer)
+{
+ int ret;
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+ if (!layer)
+ return 1;
+
+ if (!layer->enabled)
+ return 0;
+
+ if (!layer->ops->disable)
+ goto err_no_disable;
+
+ ret = layer->ops->disable(layer);
+ if (!ret)
+ layer->enabled = false;
+ else
+ DRM_ERROR("Disable failed\n");
+
+ return ret;
+
+err_no_disable:
+ DRM_ERROR("Cannot disable\n");
+ return 1;
+}
+
+const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
+{
+ if (!layer)
+ return NULL;
+
+ if (!layer->ops->get_formats)
+ return NULL;
+
+ return layer->ops->get_formats(layer);
+}
+
+unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
+{
+ if (!layer)
+ return 0;
+
+ if (!layer->ops->get_nb_formats)
+ return 0;
+
+ return layer->ops->get_nb_formats(layer);
+}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
new file mode 100644
index 000000000000..198c3774cc12
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_LAYER_H_
+#define _STI_LAYER_H_
+
+#include <drm/drmP.h>
+
+#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
+
+#define STI_LAYER_TYPE_SHIFT 8
+#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
+
+struct sti_layer;
+
+enum sti_layer_type {
+ STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
+ STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
+ STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
+ STI_BCK = 4 << STI_LAYER_TYPE_SHIFT
+};
+
+enum sti_layer_id_of_type {
+ STI_ID_0 = 0,
+ STI_ID_1 = 1,
+ STI_ID_2 = 2,
+ STI_ID_3 = 3
+};
+
+enum sti_layer_desc {
+ STI_GDP_0 = STI_GDP | STI_ID_0,
+ STI_GDP_1 = STI_GDP | STI_ID_1,
+ STI_GDP_2 = STI_GDP | STI_ID_2,
+ STI_GDP_3 = STI_GDP | STI_ID_3,
+ STI_VID_0 = STI_VID | STI_ID_0,
+ STI_VID_1 = STI_VID | STI_ID_1,
+ STI_CURSOR = STI_CUR,
+ STI_BACK = STI_BCK
+};
+
+/**
+ * STI layer functions structure
+ *
+ * @get_formats: get layer supported formats
+ * @get_nb_formats: get number of format supported
+ * @init: initialize the layer
+ * @prepare: prepare layer before rendering
+ * @commit: set layer for rendering
+ * @disable: disable layer
+ */
+struct sti_layer_funcs {
+ const uint32_t* (*get_formats)(struct sti_layer *layer);
+ unsigned int (*get_nb_formats)(struct sti_layer *layer);
+ void (*init)(struct sti_layer *layer);
+ int (*prepare)(struct sti_layer *layer, bool first_prepare);
+ int (*commit)(struct sti_layer *layer);
+ int (*disable)(struct sti_layer *layer);
+};
+
+/**
+ * STI layer structure
+ *
+ * @plane: drm plane it is bound to (if any)
+ * @fb: drm fb it is bound to
+ * @mode: display mode
+ * @desc: layer type & id
+ * @device: driver device
+ * @regs: layer registers
+ * @ops: layer functions
+ * @zorder: layer z-order
+ * @mixer_id: id of the mixer used to display the layer
+ * @enabled: to know if the layer is active or not
+ * @src_x src_y: coordinates of the input (fb) area
+ * @src_w src_h: size of the input (fb) area
+ * @dst_x dst_y: coordinates of the output (crtc) area
+ * @dst_w dst_h: size of the output (crtc) area
+ * @format: format
+ * @pitches: pitch of 'planes' (eg: Y, U, V)
+ * @offsets: offset of 'planes'
+ * @paddr: physical address of the input buffer
+ */
+struct sti_layer {
+ struct drm_plane plane;
+ struct drm_framebuffer *fb;
+ struct drm_display_mode *mode;
+ enum sti_layer_desc desc;
+ struct device *dev;
+ void __iomem *regs;
+ const struct sti_layer_funcs *ops;
+ int zorder;
+ int mixer_id;
+ bool enabled;
+ int src_x, src_y;
+ int src_w, src_h;
+ int dst_x, dst_y;
+ int dst_w, dst_h;
+ uint32_t format;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ dma_addr_t paddr;
+};
+
+struct sti_layer *sti_layer_create(struct device *dev, int desc,
+ void __iomem *baseaddr);
+int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ int mixer_id,
+ int dest_x, int dest_y,
+ int dest_w, int dest_h,
+ int src_x, int src_y,
+ int src_w, int src_h);
+int sti_layer_commit(struct sti_layer *layer);
+int sti_layer_disable(struct sti_layer *layer);
+const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
+unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
+const char *sti_layer_to_str(struct sti_layer *layer);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
new file mode 100644
index 000000000000..79f369db9fb6
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_compositor.h"
+#include "sti_mixer.h"
+#include "sti_vtg.h"
+
+/* Identity: G=Y , B=Cb , R=Cr */
+static const u32 mixerColorSpaceMatIdentity[] = {
+ 0x10000000, 0x00000000, 0x10000000, 0x00001000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+/* regs offset */
+#define GAM_MIXER_CTL 0x00
+#define GAM_MIXER_BKC 0x04
+#define GAM_MIXER_BCO 0x0C
+#define GAM_MIXER_BCS 0x10
+#define GAM_MIXER_AVO 0x28
+#define GAM_MIXER_AVS 0x2C
+#define GAM_MIXER_CRB 0x34
+#define GAM_MIXER_ACT 0x38
+#define GAM_MIXER_MBP 0x3C
+#define GAM_MIXER_MX0 0x80
+
+/* id for depth of CRB reg */
+#define GAM_DEPTH_VID0_ID 1
+#define GAM_DEPTH_VID1_ID 2
+#define GAM_DEPTH_GDP0_ID 3
+#define GAM_DEPTH_GDP1_ID 4
+#define GAM_DEPTH_GDP2_ID 5
+#define GAM_DEPTH_GDP3_ID 6
+#define GAM_DEPTH_MASK_ID 7
+
+/* mask in CTL reg */
+#define GAM_CTL_BACK_MASK BIT(0)
+#define GAM_CTL_VID0_MASK BIT(1)
+#define GAM_CTL_VID1_MASK BIT(2)
+#define GAM_CTL_GDP0_MASK BIT(3)
+#define GAM_CTL_GDP1_MASK BIT(4)
+#define GAM_CTL_GDP2_MASK BIT(5)
+#define GAM_CTL_GDP3_MASK BIT(6)
+
+const char *sti_mixer_to_str(struct sti_mixer *mixer)
+{
+ switch (mixer->id) {
+ case STI_MIXER_MAIN:
+ return "MAIN_MIXER";
+ case STI_MIXER_AUX:
+ return "AUX_MIXER";
+ default:
+ return "<UNKNOWN MIXER>";
+ }
+}
+
+static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
+{
+ return readl(mixer->regs + reg_id);
+}
+
+static inline void sti_mixer_reg_write(struct sti_mixer *mixer,
+ u32 reg_id, u32 val)
+{
+ writel(val, mixer->regs + reg_id);
+}
+
+void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
+{
+ u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
+
+ val &= ~GAM_CTL_BACK_MASK;
+ val |= enable;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+}
+
+static void sti_mixer_set_background_color(struct sti_mixer *mixer,
+ u8 red, u8 green, u8 blue)
+{
+ u32 val = (red << 16) | (green << 8) | blue;
+
+ sti_mixer_reg_write(mixer, GAM_MIXER_BKC, val);
+}
+
+static void sti_mixer_set_background_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode)
+{
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, 0);
+ yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, 0);
+ xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+
+ sti_mixer_reg_write(mixer, GAM_MIXER_BCO, ydo << 16 | xdo);
+ sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
+}
+
+int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+{
+ int layer_id = 0, depth = layer->zorder;
+ u32 mask, val;
+
+ if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+ return 1;
+
+ switch (layer->desc) {
+ case STI_GDP_0:
+ layer_id = GAM_DEPTH_GDP0_ID;
+ break;
+ case STI_GDP_1:
+ layer_id = GAM_DEPTH_GDP1_ID;
+ break;
+ case STI_GDP_2:
+ layer_id = GAM_DEPTH_GDP2_ID;
+ break;
+ case STI_GDP_3:
+ layer_id = GAM_DEPTH_GDP3_ID;
+ break;
+ case STI_VID_0:
+ layer_id = GAM_DEPTH_VID0_ID;
+ break;
+ case STI_VID_1:
+ layer_id = GAM_DEPTH_VID1_ID;
+ break;
+ default:
+ DRM_ERROR("Unknown layer %d\n", layer->desc);
+ return 1;
+ }
+ mask = GAM_DEPTH_MASK_ID << (3 * depth);
+ layer_id = layer_id << (3 * depth);
+
+ DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
+ sti_layer_to_str(layer), depth);
+ dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
+ layer_id, mask);
+
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+ val &= ~mask;
+ val |= layer_id;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
+
+ dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
+ sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
+ return 0;
+}
+
+int sti_mixer_active_video_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode)
+{
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, 0);
+ yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, 0);
+ xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+
+ DRM_DEBUG_DRIVER("%s active video area xdo:%d ydo:%d xds:%d yds:%d\n",
+ sti_mixer_to_str(mixer), xdo, ydo, xds, yds);
+ sti_mixer_reg_write(mixer, GAM_MIXER_AVO, ydo << 16 | xdo);
+ sti_mixer_reg_write(mixer, GAM_MIXER_AVS, yds << 16 | xds);
+
+ sti_mixer_set_background_color(mixer, 0xFF, 0, 0);
+
+ sti_mixer_set_background_area(mixer, mode);
+ sti_mixer_set_background_status(mixer, true);
+ return 0;
+}
+
+static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+{
+ switch (layer->desc) {
+ case STI_BACK:
+ return GAM_CTL_BACK_MASK;
+ case STI_GDP_0:
+ return GAM_CTL_GDP0_MASK;
+ case STI_GDP_1:
+ return GAM_CTL_GDP1_MASK;
+ case STI_GDP_2:
+ return GAM_CTL_GDP2_MASK;
+ case STI_GDP_3:
+ return GAM_CTL_GDP3_MASK;
+ case STI_VID_0:
+ return GAM_CTL_VID0_MASK;
+ case STI_VID_1:
+ return GAM_CTL_VID1_MASK;
+ default:
+ return 0;
+ }
+}
+
+int sti_mixer_set_layer_status(struct sti_mixer *mixer,
+ struct sti_layer *layer, bool status)
+{
+ u32 mask, val;
+
+ DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
+ sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+
+ mask = sti_mixer_get_layer_mask(layer);
+ if (!mask) {
+ DRM_ERROR("Can not find layer mask\n");
+ return -EINVAL;
+ }
+
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
+ val &= ~mask;
+ val |= status ? mask : 0;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+
+ return 0;
+}
+
+void sti_mixer_set_matrix(struct sti_mixer *mixer)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++)
+ sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4),
+ mixerColorSpaceMatIdentity[i]);
+}
+
+struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+ void __iomem *baseaddr)
+{
+ struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
+ struct device_node *np = dev->of_node;
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (!mixer) {
+ DRM_ERROR("Failed to allocated memory for mixer\n");
+ return NULL;
+ }
+ mixer->regs = baseaddr;
+ mixer->dev = dev;
+ mixer->id = id;
+
+ if (of_device_is_compatible(np, "st,stih416-compositor"))
+ sti_mixer_set_matrix(mixer);
+
+ DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
+ sti_mixer_to_str(mixer), mixer->regs);
+
+ return mixer;
+}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
new file mode 100644
index 000000000000..874372102e52
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_MIXER_H_
+#define _STI_MIXER_H_
+
+#include <drm/drmP.h>
+
+#include "sti_layer.h"
+
+#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
+
+/**
+ * STI Mixer subdevice structure
+ *
+ * @dev: driver device
+ * @regs: mixer registers
+ * @id: id of the mixer
+ * @drm_crtc: crtc object link to the mixer
+ * @pending_event: set if a flip event is pending on crtc
+ */
+struct sti_mixer {
+ struct device *dev;
+ void __iomem *regs;
+ int id;
+ struct drm_crtc drm_crtc;
+ struct drm_pending_vblank_event *pending_event;
+};
+
+const char *sti_mixer_to_str(struct sti_mixer *mixer);
+
+struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+ void __iomem *baseaddr);
+
+int sti_mixer_set_layer_status(struct sti_mixer *mixer,
+ struct sti_layer *layer, bool status);
+int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_active_video_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode);
+
+void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
+
+/* depth in Cross-bar control = z order */
+#define GAM_MIXER_NB_DEPTH_LEVEL 7
+
+#define STI_MIXER_MAIN 0
+#define STI_MIXER_AUX 1
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
new file mode 100644
index 000000000000..b69e26fee76e
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Vincent Abriou <vincent.abriou@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+/* glue registers */
+#define TVO_CSC_MAIN_M0 0x000
+#define TVO_CSC_MAIN_M1 0x004
+#define TVO_CSC_MAIN_M2 0x008
+#define TVO_CSC_MAIN_M3 0x00c
+#define TVO_CSC_MAIN_M4 0x010
+#define TVO_CSC_MAIN_M5 0x014
+#define TVO_CSC_MAIN_M6 0x018
+#define TVO_CSC_MAIN_M7 0x01c
+#define TVO_MAIN_IN_VID_FORMAT 0x030
+#define TVO_CSC_AUX_M0 0x100
+#define TVO_CSC_AUX_M1 0x104
+#define TVO_CSC_AUX_M2 0x108
+#define TVO_CSC_AUX_M3 0x10c
+#define TVO_CSC_AUX_M4 0x110
+#define TVO_CSC_AUX_M5 0x114
+#define TVO_CSC_AUX_M6 0x118
+#define TVO_CSC_AUX_M7 0x11c
+#define TVO_AUX_IN_VID_FORMAT 0x130
+#define TVO_VIP_HDF 0x400
+#define TVO_HD_SYNC_SEL 0x418
+#define TVO_HD_DAC_CFG_OFF 0x420
+#define TVO_VIP_HDMI 0x500
+#define TVO_HDMI_FORCE_COLOR_0 0x504
+#define TVO_HDMI_FORCE_COLOR_1 0x508
+#define TVO_HDMI_CLIP_VALUE_B_CB 0x50c
+#define TVO_HDMI_CLIP_VALUE_Y_G 0x510
+#define TVO_HDMI_CLIP_VALUE_R_CR 0x514
+#define TVO_HDMI_SYNC_SEL 0x518
+#define TVO_HDMI_DFV_OBS 0x540
+
+#define TVO_IN_FMT_SIGNED BIT(0)
+#define TVO_SYNC_EXT BIT(4)
+
+#define TVO_VIP_REORDER_R_SHIFT 24
+#define TVO_VIP_REORDER_G_SHIFT 20
+#define TVO_VIP_REORDER_B_SHIFT 16
+#define TVO_VIP_REORDER_MASK 0x3
+#define TVO_VIP_REORDER_Y_G_SEL 0
+#define TVO_VIP_REORDER_CB_B_SEL 1
+#define TVO_VIP_REORDER_CR_R_SEL 2
+
+#define TVO_VIP_CLIP_SHIFT 8
+#define TVO_VIP_CLIP_MASK 0x7
+#define TVO_VIP_CLIP_DISABLED 0
+#define TVO_VIP_CLIP_EAV_SAV 1
+#define TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y 2
+#define TVO_VIP_CLIP_LIMITED_RANGE_CB_CR 3
+#define TVO_VIP_CLIP_PROG_RANGE 4
+
+#define TVO_VIP_RND_SHIFT 4
+#define TVO_VIP_RND_MASK 0x3
+#define TVO_VIP_RND_8BIT_ROUNDED 0
+#define TVO_VIP_RND_10BIT_ROUNDED 1
+#define TVO_VIP_RND_12BIT_ROUNDED 2
+
+#define TVO_VIP_SEL_INPUT_MASK 0xf
+#define TVO_VIP_SEL_INPUT_MAIN 0x0
+#define TVO_VIP_SEL_INPUT_AUX 0x8
+#define TVO_VIP_SEL_INPUT_FORCE_COLOR 0xf
+#define TVO_VIP_SEL_INPUT_BYPASS_MASK 0x1
+#define TVO_VIP_SEL_INPUT_BYPASSED 1
+
+#define TVO_SYNC_MAIN_VTG_SET_REF 0x00
+#define TVO_SYNC_MAIN_VTG_SET_1 0x01
+#define TVO_SYNC_MAIN_VTG_SET_2 0x02
+#define TVO_SYNC_MAIN_VTG_SET_3 0x03
+#define TVO_SYNC_MAIN_VTG_SET_4 0x04
+#define TVO_SYNC_MAIN_VTG_SET_5 0x05
+#define TVO_SYNC_MAIN_VTG_SET_6 0x06
+#define TVO_SYNC_AUX_VTG_SET_REF 0x10
+#define TVO_SYNC_AUX_VTG_SET_1 0x11
+#define TVO_SYNC_AUX_VTG_SET_2 0x12
+#define TVO_SYNC_AUX_VTG_SET_3 0x13
+#define TVO_SYNC_AUX_VTG_SET_4 0x14
+#define TVO_SYNC_AUX_VTG_SET_5 0x15
+#define TVO_SYNC_AUX_VTG_SET_6 0x16
+
+#define TVO_SYNC_HD_DCS_SHIFT 8
+
+#define ENCODER_MAIN_CRTC_MASK BIT(0)
+
+/* enum listing the supported output data format */
+enum sti_tvout_video_out_type {
+ STI_TVOUT_VIDEO_OUT_RGB,
+ STI_TVOUT_VIDEO_OUT_YUV,
+};
+
+struct sti_tvout {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ void __iomem *regs;
+ struct reset_control *reset;
+ struct drm_encoder *hdmi;
+ struct drm_encoder *hda;
+};
+
+struct sti_tvout_encoder {
+ struct drm_encoder encoder;
+ struct sti_tvout *tvout;
+};
+
+#define to_sti_tvout_encoder(x) \
+ container_of(x, struct sti_tvout_encoder, encoder)
+
+#define to_sti_tvout(x) to_sti_tvout_encoder(x)->tvout
+
+/* preformatter conversion matrix */
+static const u32 rgb_to_ycbcr_601[8] = {
+ 0xF927082E, 0x04C9FEAB, 0x01D30964, 0xFA95FD3D,
+ 0x0000082E, 0x00002000, 0x00002000, 0x00000000
+};
+
+/* 709 RGB to YCbCr */
+static const u32 rgb_to_ycbcr_709[8] = {
+ 0xF891082F, 0x0367FF40, 0x01280B71, 0xF9B1FE20,
+ 0x0000082F, 0x00002000, 0x00002000, 0x00000000
+};
+
+static u32 tvout_read(struct sti_tvout *tvout, int offset)
+{
+ return readl(tvout->regs + offset);
+}
+
+static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
+{
+ writel(val, tvout->regs + offset);
+}
+
+/**
+ * Set the clipping mode of a VIP
+ *
+ * @tvout: tvout structure
+ * @cr_r:
+ * @y_g:
+ * @cb_b:
+ */
+static void tvout_vip_set_color_order(struct sti_tvout *tvout,
+ u32 cr_r, u32 y_g, u32 cb_b)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT);
+ val |= cr_r << TVO_VIP_REORDER_R_SHIFT;
+ val |= y_g << TVO_VIP_REORDER_G_SHIFT;
+ val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
+
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Set the clipping mode of a VIP
+ *
+ * @tvout: tvout structure
+ * @range: clipping range
+ */
+static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
+ val |= range << TVO_VIP_CLIP_SHIFT;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Set the rounded value of a VIP
+ *
+ * @tvout: tvout structure
+ * @rnd: rounded val per component
+ */
+static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
+ val |= rnd << TVO_VIP_RND_SHIFT;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Select the VIP input
+ *
+ * @tvout: tvout structure
+ * @sel_input: selected_input (main/aux + conv)
+ */
+static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
+ bool main_path,
+ bool sel_input_logic_inverted,
+ enum sti_tvout_video_out_type video_out)
+{
+ u32 sel_input;
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ if (main_path)
+ sel_input = TVO_VIP_SEL_INPUT_MAIN;
+ else
+ sel_input = TVO_VIP_SEL_INPUT_AUX;
+
+ switch (video_out) {
+ case STI_TVOUT_VIDEO_OUT_RGB:
+ sel_input |= TVO_VIP_SEL_INPUT_BYPASSED;
+ break;
+ case STI_TVOUT_VIDEO_OUT_YUV:
+ sel_input &= ~TVO_VIP_SEL_INPUT_BYPASSED;
+ break;
+ }
+
+ /* on stih407 chip the sel_input bypass mode logic is inverted */
+ if (sel_input_logic_inverted)
+ sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
+
+ val &= ~TVO_VIP_SEL_INPUT_MASK;
+ val |= sel_input;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Select the input video signed or unsigned
+ *
+ * @tvout: tvout structure
+ * @in_vid_signed: used video input format
+ */
+static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~TVO_IN_FMT_SIGNED;
+ val |= in_vid_fmt;
+ tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT);
+}
+
+/**
+ * Start VIP block for HDMI output
+ *
+ * @tvout: pointer on tvout structure
+ * @main_path: true if main path has to be used in the vip configuration
+ * else aux path is used.
+ */
+static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
+{
+ struct device_node *node = tvout->dev->of_node;
+ bool sel_input_logic_inverted = false;
+
+ dev_dbg(tvout->dev, "%s\n", __func__);
+
+ if (main_path) {
+ DRM_DEBUG_DRIVER("main vip for hdmi\n");
+ /* select the input sync for hdmi = VTG set 1 */
+ tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ } else {
+ DRM_DEBUG_DRIVER("aux vip for hdmi\n");
+ /* select the input sync for hdmi = VTG set 1 */
+ tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ }
+
+ /* set color channel order */
+ tvout_vip_set_color_order(tvout,
+ TVO_VIP_REORDER_CR_R_SEL,
+ TVO_VIP_REORDER_Y_G_SEL,
+ TVO_VIP_REORDER_CB_B_SEL);
+
+ /* set clipping mode (Limited range RGB/Y) */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+
+ /* set round mode (rounded to 8-bit per component) */
+ tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED);
+
+ if (of_device_is_compatible(node, "st,stih407-tvout")) {
+ /* set input video format */
+ tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT,
+ TVO_IN_FMT_SIGNED);
+ sel_input_logic_inverted = true;
+ }
+
+ /* input selection */
+ tvout_vip_set_sel_input(tvout, main_path,
+ sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
+}
+
+/**
+ * Start HDF VIP and HD DAC
+ *
+ * @tvout: pointer on tvout structure
+ * @main_path: true if main path has to be used in the vip configuration
+ * else aux path is used.
+ */
+static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
+{
+ struct device_node *node = tvout->dev->of_node;
+ bool sel_input_logic_inverted = false;
+
+ dev_dbg(tvout->dev, "%s\n", __func__);
+
+ if (!main_path) {
+ DRM_ERROR("HD Analog on aux not implemented\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("main vip for HDF\n");
+
+ /* set color channel order */
+ tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF,
+ TVO_VIP_REORDER_CR_R_SEL,
+ TVO_VIP_REORDER_Y_G_SEL,
+ TVO_VIP_REORDER_CB_B_SEL);
+
+ /* set clipping mode (Limited range RGB/Y) */
+ tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF,
+ TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
+
+ /* set round mode (rounded to 10-bit per component) */
+ tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
+
+ if (of_device_is_compatible(node, "st,stih407-tvout")) {
+ /* set input video format */
+ tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED);
+ sel_input_logic_inverted = true;
+ }
+
+ /* Input selection */
+ tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF,
+ main_path,
+ sel_input_logic_inverted,
+ STI_TVOUT_VIDEO_OUT_YUV);
+
+ /* select the input sync for HD analog = VTG set 3
+ * and HD DCS = VTG set 2 */
+ tvout_write(tvout,
+ (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
+ | TVO_SYNC_MAIN_VTG_SET_3,
+ TVO_HD_SYNC_SEL);
+
+ /* power up HD DAC */
+ tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
+}
+
+static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void sti_tvout_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(sti_encoder);
+}
+
+static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
+ .destroy = sti_tvout_encoder_destroy,
+};
+
+static void sti_hda_encoder_commit(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ tvout_hda_start(tvout, true);
+}
+
+static void sti_hda_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ /* reset VIP register */
+ tvout_write(tvout, 0x0, TVO_VIP_HDF);
+
+ /* power down HD DAC */
+ tvout_write(tvout, 1, TVO_HD_DAC_CFG_OFF);
+}
+
+static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
+ .dpms = sti_tvout_encoder_dpms,
+ .mode_fixup = sti_tvout_encoder_mode_fixup,
+ .mode_set = sti_tvout_encoder_mode_set,
+ .prepare = sti_tvout_encoder_prepare,
+ .commit = sti_hda_encoder_commit,
+ .disable = sti_hda_encoder_disable,
+};
+
+static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ struct sti_tvout_encoder *encoder;
+ struct drm_encoder *drm_encoder;
+
+ encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ encoder->tvout = tvout;
+
+ drm_encoder = (struct drm_encoder *) encoder;
+
+ drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_clones = 1 << 0;
+
+ drm_encoder_init(dev, drm_encoder,
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC);
+
+ drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
+
+ return drm_encoder;
+}
+
+static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ tvout_hdmi_start(tvout, true);
+}
+
+static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ /* reset VIP register */
+ tvout_write(tvout, 0x0, TVO_VIP_HDMI);
+}
+
+static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
+ .dpms = sti_tvout_encoder_dpms,
+ .mode_fixup = sti_tvout_encoder_mode_fixup,
+ .mode_set = sti_tvout_encoder_mode_set,
+ .prepare = sti_tvout_encoder_prepare,
+ .commit = sti_hdmi_encoder_commit,
+ .disable = sti_hdmi_encoder_disable,
+};
+
+static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ struct sti_tvout_encoder *encoder;
+ struct drm_encoder *drm_encoder;
+
+ encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ encoder->tvout = tvout;
+
+ drm_encoder = (struct drm_encoder *) encoder;
+
+ drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_clones = 1 << 1;
+
+ drm_encoder_init(dev, drm_encoder,
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
+
+ return drm_encoder;
+}
+
+static void sti_tvout_create_encoders(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
+ tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
+}
+
+static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
+{
+ if (tvout->hdmi)
+ drm_encoder_cleanup(tvout->hdmi);
+ tvout->hdmi = NULL;
+
+ if (tvout->hda)
+ drm_encoder_cleanup(tvout->hda);
+ tvout->hda = NULL;
+}
+
+static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_tvout *tvout = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i;
+ int ret;
+
+ tvout->drm_dev = drm_dev;
+
+ /* set preformatter matrix */
+ for (i = 0; i < 8; i++) {
+ tvout_write(tvout, rgb_to_ycbcr_601[i],
+ TVO_CSC_MAIN_M0 + (i * 4));
+ tvout_write(tvout, rgb_to_ycbcr_601[i],
+ TVO_CSC_AUX_M0 + (i * 4));
+ }
+
+ sti_tvout_create_encoders(drm_dev, tvout);
+
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ sti_tvout_destroy_encoders(tvout);
+
+ return ret;
+}
+
+static void sti_tvout_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_tvout_ops = {
+ .bind = sti_tvout_bind,
+ .unbind = sti_tvout_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sti_tvout_master_bind(struct device *dev)
+{
+ return 0;
+}
+
+static void sti_tvout_master_unbind(struct device *dev)
+{
+ /* do nothing */
+}
+
+static const struct component_master_ops sti_tvout_master_ops = {
+ .bind = sti_tvout_master_bind,
+ .unbind = sti_tvout_master_unbind,
+};
+
+static int sti_tvout_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct sti_tvout *tvout;
+ struct resource *res;
+ struct device_node *child_np;
+ struct component_match *match = NULL;
+
+ DRM_INFO("%s\n", __func__);
+
+ if (!node)
+ return -ENODEV;
+
+ tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL);
+ if (!tvout)
+ return -ENOMEM;
+
+ tvout->dev = dev;
+
+ /* get Memory ressources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
+ if (!res) {
+ DRM_ERROR("Invalid glue resource\n");
+ return -ENOMEM;
+ }
+ tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
+
+ /* get reset resources */
+ tvout->reset = devm_reset_control_get(dev, "tvout");
+ /* take tvout out of reset */
+ if (!IS_ERR(tvout->reset))
+ reset_control_deassert(tvout->reset);
+
+ platform_set_drvdata(pdev, tvout);
+
+ of_platform_populate(node, NULL, NULL, dev);
+
+ child_np = of_get_next_available_child(node, NULL);
+
+ while (child_np) {
+ component_match_add(dev, &match, compare_of, child_np);
+ of_node_put(child_np);
+ child_np = of_get_next_available_child(node, child_np);
+ }
+
+ component_master_add_with_match(dev, &sti_tvout_master_ops, match);
+
+ return component_add(dev, &sti_tvout_ops);
+}
+
+static int sti_tvout_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &sti_tvout_master_ops);
+ component_del(&pdev->dev, &sti_tvout_ops);
+ return 0;
+}
+
+static struct of_device_id tvout_of_match[] = {
+ { .compatible = "st,stih416-tvout", },
+ { .compatible = "st,stih407-tvout", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, tvout_of_match);
+
+struct platform_driver sti_tvout_driver = {
+ .driver = {
+ .name = "sti-tvout",
+ .owner = THIS_MODULE,
+ .of_match_table = tvout_of_match,
+ },
+ .probe = sti_tvout_probe,
+ .remove = sti_tvout_remove,
+};
+
+module_platform_driver(sti_tvout_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
new file mode 100644
index 000000000000..10ced6a479f4
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include "sti_layer.h"
+#include "sti_vid.h"
+#include "sti_vtg.h"
+
+/* Registers */
+#define VID_CTL 0x00
+#define VID_ALP 0x04
+#define VID_CLF 0x08
+#define VID_VPO 0x0C
+#define VID_VPS 0x10
+#define VID_KEY1 0x28
+#define VID_KEY2 0x2C
+#define VID_MPR0 0x30
+#define VID_MPR1 0x34
+#define VID_MPR2 0x38
+#define VID_MPR3 0x3C
+#define VID_MST 0x68
+#define VID_BC 0x70
+#define VID_TINT 0x74
+#define VID_CSAT 0x78
+
+/* Registers values */
+#define VID_CTL_IGNORE (BIT(31) | BIT(30))
+#define VID_CTL_PSI_ENABLE (BIT(2) | BIT(1) | BIT(0))
+#define VID_ALP_OPAQUE 0x00000080
+#define VID_BC_DFLT 0x00008000
+#define VID_TINT_DFLT 0x00000000
+#define VID_CSAT_DFLT 0x00000080
+/* YCbCr to RGB BT709:
+ * R = Y+1.5391Cr
+ * G = Y-0.4590Cr-0.1826Cb
+ * B = Y+1.8125Cb */
+#define VID_MPR0_BT709 0x0A800000
+#define VID_MPR1_BT709 0x0AC50000
+#define VID_MPR2_BT709 0x07150545
+#define VID_MPR3_BT709 0x00000AE8
+
+static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+{
+ u32 val;
+
+ /* Unmask */
+ val = readl(vid->regs + VID_CTL);
+ val &= ~VID_CTL_IGNORE;
+ writel(val, vid->regs + VID_CTL);
+
+ return 0;
+}
+
+static int sti_vid_commit_layer(struct sti_layer *vid)
+{
+ struct drm_display_mode *mode = vid->mode;
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
+ yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+
+ writel((ydo << 16) | xdo, vid->regs + VID_VPO);
+ writel((yds << 16) | xds, vid->regs + VID_VPS);
+
+ return 0;
+}
+
+static int sti_vid_disable_layer(struct sti_layer *vid)
+{
+ u32 val;
+
+ /* Mask */
+ val = readl(vid->regs + VID_CTL);
+ val |= VID_CTL_IGNORE;
+ writel(val, vid->regs + VID_CTL);
+
+ return 0;
+}
+
+static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
+{
+ return NULL;
+}
+
+static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
+{
+ return 0;
+}
+
+static void sti_vid_init(struct sti_layer *vid)
+{
+ /* Enable PSI, Mask layer */
+ writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
+
+ /* Opaque */
+ writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
+
+ /* Color conversion parameters */
+ writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
+ writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
+ writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
+ writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
+
+ /* Brightness, contrast, tint, saturation */
+ writel(VID_BC_DFLT, vid->regs + VID_BC);
+ writel(VID_TINT_DFLT, vid->regs + VID_TINT);
+ writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
+}
+
+static const struct sti_layer_funcs vid_ops = {
+ .get_formats = sti_vid_get_formats,
+ .get_nb_formats = sti_vid_get_nb_formats,
+ .init = sti_vid_init,
+ .prepare = sti_vid_prepare_layer,
+ .commit = sti_vid_commit_layer,
+ .disable = sti_vid_disable_layer,
+};
+
+struct sti_layer *sti_vid_create(struct device *dev)
+{
+ struct sti_layer *vid;
+
+ vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
+ if (!vid) {
+ DRM_ERROR("Failed to allocate memory for VID\n");
+ return NULL;
+ }
+
+ vid->ops = &vid_ops;
+
+ return vid;
+}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
new file mode 100644
index 000000000000..2c0aecd63294
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_VID_H_
+#define _STI_VID_H_
+
+struct sti_layer *sti_vid_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
new file mode 100644
index 000000000000..82a51d488434
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtac.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+/* registers offset */
+#define VTAC_CONFIG 0x00
+#define VTAC_RX_FIFO_CONFIG 0x04
+#define VTAC_FIFO_CONFIG_VAL 0x04
+
+#define VTAC_SYS_CFG8521 0x824
+#define VTAC_SYS_CFG8522 0x828
+
+/* Number of phyts per pixel */
+#define VTAC_2_5_PPP 0x0005
+#define VTAC_3_PPP 0x0006
+#define VTAC_4_PPP 0x0008
+#define VTAC_5_PPP 0x000A
+#define VTAC_6_PPP 0x000C
+#define VTAC_13_PPP 0x001A
+#define VTAC_14_PPP 0x001C
+#define VTAC_15_PPP 0x001E
+#define VTAC_16_PPP 0x0020
+#define VTAC_17_PPP 0x0022
+#define VTAC_18_PPP 0x0024
+
+/* enable bits */
+#define VTAC_ENABLE 0x3003
+
+#define VTAC_TX_PHY_ENABLE_CLK_PHY BIT(0)
+#define VTAC_TX_PHY_ENABLE_CLK_DLL BIT(1)
+#define VTAC_TX_PHY_PLL_NOT_OSC_MODE BIT(3)
+#define VTAC_TX_PHY_RST_N_DLL_SWITCH BIT(4)
+#define VTAC_TX_PHY_PROG_N3 BIT(9)
+
+
+/**
+ * VTAC mode structure
+ *
+ * @vid_in_width: Video Data Resolution
+ * @phyts_width: Width of phyt buses(phyt low and phyt high).
+ * @phyts_per_pixel: Number of phyts sent per pixel
+ */
+struct sti_vtac_mode {
+ u32 vid_in_width;
+ u32 phyts_width;
+ u32 phyts_per_pixel;
+};
+
+static const struct sti_vtac_mode vtac_mode_main = {0x2, 0x2, VTAC_5_PPP};
+static const struct sti_vtac_mode vtac_mode_aux = {0x1, 0x0, VTAC_17_PPP};
+
+/**
+ * VTAC structure
+ *
+ * @dev: pointer to device structure
+ * @regs: ioremapped registers for RX and TX devices
+ * @phy_regs: phy registers for TX device
+ * @clk: clock
+ * @mode: main or auxillary configuration mode
+ */
+struct sti_vtac {
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *phy_regs;
+ struct clk *clk;
+ const struct sti_vtac_mode *mode;
+};
+
+static void sti_vtac_rx_set_config(struct sti_vtac *vtac)
+{
+ u32 config;
+
+ /* Enable VTAC clock */
+ if (clk_prepare_enable(vtac->clk))
+ DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n");
+
+ writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG);
+
+ config = VTAC_ENABLE;
+ config |= vtac->mode->vid_in_width << 4;
+ config |= vtac->mode->phyts_width << 16;
+ config |= vtac->mode->phyts_per_pixel << 23;
+ writel(config, vtac->regs + VTAC_CONFIG);
+}
+
+static void sti_vtac_tx_set_config(struct sti_vtac *vtac)
+{
+ u32 phy_config;
+ u32 config;
+
+ /* Enable VTAC clock */
+ if (clk_prepare_enable(vtac->clk))
+ DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n");
+
+ /* Configure vtac phy */
+ phy_config = 0x00000000;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522);
+ phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_PROG_N3;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+
+ /* Configure vtac tx */
+ config = VTAC_ENABLE;
+ config |= vtac->mode->vid_in_width << 4;
+ config |= vtac->mode->phyts_width << 16;
+ config |= vtac->mode->phyts_per_pixel << 23;
+ writel(config, vtac->regs + VTAC_CONFIG);
+}
+
+static const struct of_device_id vtac_of_match[] = {
+ {
+ .compatible = "st,vtac-main",
+ .data = &vtac_mode_main,
+ }, {
+ .compatible = "st,vtac-aux",
+ .data = &vtac_mode_aux,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, vtac_of_match);
+
+static int sti_vtac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *id;
+ struct sti_vtac *vtac;
+ struct resource *res;
+
+ vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL);
+ if (!vtac)
+ return -ENOMEM;
+
+ vtac->dev = dev;
+
+ id = of_match_node(vtac_of_match, np);
+ if (!id)
+ return -ENOMEM;
+
+ vtac->mode = id->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("Invalid resource\n");
+ return -ENOMEM;
+ }
+ vtac->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vtac->regs))
+ return PTR_ERR(vtac->regs);
+
+
+ vtac->clk = devm_clk_get(dev, "vtac");
+ if (IS_ERR(vtac->clk)) {
+ DRM_ERROR("Cannot get vtac clock\n");
+ return PTR_ERR(vtac->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ vtac->phy_regs = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ sti_vtac_tx_set_config(vtac);
+ } else {
+
+ sti_vtac_rx_set_config(vtac);
+ }
+
+ platform_set_drvdata(pdev, vtac);
+ DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev));
+
+ return 0;
+}
+
+static int sti_vtac_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver sti_vtac_driver = {
+ .driver = {
+ .name = "sti-vtac",
+ .owner = THIS_MODULE,
+ .of_match_table = vtac_of_match,
+ },
+ .probe = sti_vtac_probe,
+ .remove = sti_vtac_remove,
+};
+
+module_platform_driver(sti_vtac_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
new file mode 100644
index 000000000000..740d6e347a62
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * Vincent Abriou <vincent.abriou@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+#include "sti_vtg.h"
+
+#define VTG_TYPE_MASTER 0
+#define VTG_TYPE_SLAVE_BY_EXT0 1
+
+/* registers offset */
+#define VTG_MODE 0x0000
+#define VTG_CLKLN 0x0008
+#define VTG_HLFLN 0x000C
+#define VTG_DRST_AUTOC 0x0010
+#define VTG_VID_TFO 0x0040
+#define VTG_VID_TFS 0x0044
+#define VTG_VID_BFO 0x0048
+#define VTG_VID_BFS 0x004C
+
+#define VTG_HOST_ITS 0x0078
+#define VTG_HOST_ITS_BCLR 0x007C
+#define VTG_HOST_ITM_BCLR 0x0088
+#define VTG_HOST_ITM_BSET 0x008C
+
+#define VTG_H_HD_1 0x00C0
+#define VTG_TOP_V_VD_1 0x00C4
+#define VTG_BOT_V_VD_1 0x00C8
+#define VTG_TOP_V_HD_1 0x00CC
+#define VTG_BOT_V_HD_1 0x00D0
+
+#define VTG_H_HD_2 0x00E0
+#define VTG_TOP_V_VD_2 0x00E4
+#define VTG_BOT_V_VD_2 0x00E8
+#define VTG_TOP_V_HD_2 0x00EC
+#define VTG_BOT_V_HD_2 0x00F0
+
+#define VTG_H_HD_3 0x0100
+#define VTG_TOP_V_VD_3 0x0104
+#define VTG_BOT_V_VD_3 0x0108
+#define VTG_TOP_V_HD_3 0x010C
+#define VTG_BOT_V_HD_3 0x0110
+
+#define VTG_IRQ_BOTTOM BIT(0)
+#define VTG_IRQ_TOP BIT(1)
+#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
+
+/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
+#define AWG_DELAY_HD (-9)
+#define AWG_DELAY_ED (-8)
+#define AWG_DELAY_SD (-7)
+
+LIST_HEAD(vtg_lookup);
+
+/**
+ * STI VTG structure
+ *
+ * @dev: pointer to device driver
+ * @data: data associated to the device
+ * @irq: VTG irq
+ * @type: VTG type (main or aux)
+ * @notifier_list: notifier callback
+ * @crtc_id: the crtc id for vblank event
+ * @slave: slave vtg
+ * @link: List node to link the structure in lookup list
+ */
+struct sti_vtg {
+ struct device *dev;
+ struct device_node *np;
+ void __iomem *regs;
+ int irq;
+ u32 irq_status;
+ struct raw_notifier_head notifier_list;
+ int crtc_id;
+ struct sti_vtg *slave;
+ struct list_head link;
+};
+
+static void vtg_register(struct sti_vtg *vtg)
+{
+ list_add_tail(&vtg->link, &vtg_lookup);
+}
+
+struct sti_vtg *of_vtg_find(struct device_node *np)
+{
+ struct sti_vtg *vtg;
+
+ list_for_each_entry(vtg, &vtg_lookup, link) {
+ if (vtg->np == np)
+ return vtg;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_vtg_find);
+
+static void vtg_reset(struct sti_vtg *vtg)
+{
+ /* reset slave and then master */
+ if (vtg->slave)
+ vtg_reset(vtg->slave);
+
+ writel(1, vtg->regs + VTG_DRST_AUTOC);
+}
+
+static void vtg_set_mode(struct sti_vtg *vtg,
+ int type, const struct drm_display_mode *mode)
+{
+ u32 tmp;
+
+ if (vtg->slave)
+ vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode);
+
+ writel(mode->htotal, vtg->regs + VTG_CLKLN);
+ writel(mode->vtotal * 2, vtg->regs + VTG_HLFLN);
+
+ tmp = (mode->vtotal - mode->vsync_start + 1) << 16;
+ tmp |= mode->htotal - mode->hsync_start;
+ writel(tmp, vtg->regs + VTG_VID_TFO);
+ writel(tmp, vtg->regs + VTG_VID_BFO);
+
+ tmp = (mode->vdisplay + mode->vtotal - mode->vsync_start + 1) << 16;
+ tmp |= mode->hdisplay + mode->htotal - mode->hsync_start;
+ writel(tmp, vtg->regs + VTG_VID_TFS);
+ writel(tmp, vtg->regs + VTG_VID_BFS);
+
+ /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_1);
+ writel(tmp, vtg->regs + VTG_H_HD_2);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_1);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_1);
+ writel(0, vtg->regs + VTG_TOP_V_HD_1);
+ writel(0, vtg->regs + VTG_BOT_V_HD_1);
+
+ /* prepare VTG set 2 for for HD DCS */
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
+ writel(0, vtg->regs + VTG_TOP_V_HD_2);
+ writel(0, vtg->regs + VTG_BOT_V_HD_2);
+
+ /* prepare VTG set 3 for HD Analog in HD mode */
+ tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16;
+ tmp |= mode->htotal + AWG_DELAY_HD;
+ writel(tmp, vtg->regs + VTG_H_HD_3);
+
+ tmp = (mode->vsync_end - mode->vsync_start) << 16;
+ tmp |= mode->vtotal;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_3);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_3);
+
+ tmp = (mode->htotal + AWG_DELAY_HD) << 16;
+ tmp |= mode->htotal + AWG_DELAY_HD;
+ writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
+ writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
+
+ /* mode */
+ writel(type, vtg->regs + VTG_MODE);
+}
+
+static void vtg_enable_irq(struct sti_vtg *vtg)
+{
+ /* clear interrupt status and mask */
+ writel(0xFFFF, vtg->regs + VTG_HOST_ITS_BCLR);
+ writel(0xFFFF, vtg->regs + VTG_HOST_ITM_BCLR);
+ writel(VTG_IRQ_MASK, vtg->regs + VTG_HOST_ITM_BSET);
+}
+
+void sti_vtg_set_config(struct sti_vtg *vtg,
+ const struct drm_display_mode *mode)
+{
+ /* write configuration */
+ vtg_set_mode(vtg, VTG_TYPE_MASTER, mode);
+
+ vtg_reset(vtg);
+
+ /* enable irq for the vtg vblank synchro */
+ if (vtg->slave)
+ vtg_enable_irq(vtg->slave);
+ else
+ vtg_enable_irq(vtg);
+}
+EXPORT_SYMBOL(sti_vtg_set_config);
+
+/**
+ * sti_vtg_get_line_number
+ *
+ * @mode: display mode to be used
+ * @y: line
+ *
+ * Return the line number according to the display mode taking
+ * into account the Sync and Back Porch information.
+ * Video frame line numbers start at 1, y starts at 0.
+ * In interlaced modes the start line is the field line number of the odd
+ * field, but y is still defined as a progressive frame.
+ */
+u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y)
+{
+ u32 start_line = mode.vtotal - mode.vsync_start + 1;
+
+ if (mode.flags & DRM_MODE_FLAG_INTERLACE)
+ start_line *= 2;
+
+ return start_line + y;
+}
+EXPORT_SYMBOL(sti_vtg_get_line_number);
+
+/**
+ * sti_vtg_get_pixel_number
+ *
+ * @mode: display mode to be used
+ * @x: row
+ *
+ * Return the pixel number according to the display mode taking
+ * into account the Sync and Back Porch information.
+ * Pixels are counted from 0.
+ */
+u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x)
+{
+ return mode.htotal - mode.hsync_start + x;
+}
+EXPORT_SYMBOL(sti_vtg_get_pixel_number);
+
+int sti_vtg_register_client(struct sti_vtg *vtg,
+ struct notifier_block *nb, int crtc_id)
+{
+ if (vtg->slave)
+ return sti_vtg_register_client(vtg->slave, nb, crtc_id);
+
+ vtg->crtc_id = crtc_id;
+ return raw_notifier_chain_register(&vtg->notifier_list, nb);
+}
+EXPORT_SYMBOL(sti_vtg_register_client);
+
+int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb)
+{
+ if (vtg->slave)
+ return sti_vtg_unregister_client(vtg->slave, nb);
+
+ return raw_notifier_chain_unregister(&vtg->notifier_list, nb);
+}
+EXPORT_SYMBOL(sti_vtg_unregister_client);
+
+static irqreturn_t vtg_irq_thread(int irq, void *arg)
+{
+ struct sti_vtg *vtg = arg;
+ u32 event;
+
+ event = (vtg->irq_status & VTG_IRQ_TOP) ?
+ VTG_TOP_FIELD_EVENT : VTG_BOTTOM_FIELD_EVENT;
+
+ raw_notifier_call_chain(&vtg->notifier_list, event, &vtg->crtc_id);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vtg_irq(int irq, void *arg)
+{
+ struct sti_vtg *vtg = arg;
+
+ vtg->irq_status = readl(vtg->regs + VTG_HOST_ITS);
+
+ writel(vtg->irq_status, vtg->regs + VTG_HOST_ITS_BCLR);
+
+ /* force sync bus write */
+ readl(vtg->regs + VTG_HOST_ITS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int vtg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct sti_vtg *vtg;
+ struct resource *res;
+ char irq_name[32];
+ int ret;
+
+ vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
+ if (!vtg)
+ return -ENOMEM;
+
+ vtg->dev = dev;
+ vtg->np = pdev->dev.of_node;
+
+ /* Get Memory ressources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENOMEM;
+ }
+ vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+
+ np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
+ if (np) {
+ vtg->slave = of_vtg_find(np);
+
+ if (!vtg->slave)
+ return -EPROBE_DEFER;
+ } else {
+ vtg->irq = platform_get_irq(pdev, 0);
+ if (IS_ERR_VALUE(vtg->irq)) {
+ DRM_ERROR("Failed to get VTG interrupt\n");
+ return vtg->irq;
+ }
+
+ snprintf(irq_name, sizeof(irq_name), "vsync-%s",
+ dev_name(vtg->dev));
+
+ RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
+
+ ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
+ vtg_irq_thread, IRQF_ONESHOT, irq_name, vtg);
+ if (IS_ERR_VALUE(ret)) {
+ DRM_ERROR("Failed to register VTG interrupt\n");
+ return ret;
+ }
+ }
+
+ vtg_register(vtg);
+ platform_set_drvdata(pdev, vtg);
+
+ DRM_INFO("%s %s\n", __func__, dev_name(vtg->dev));
+
+ return 0;
+}
+
+static int vtg_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id vtg_of_match[] = {
+ { .compatible = "st,vtg", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vtg_of_match);
+
+struct platform_driver sti_vtg_driver = {
+ .driver = {
+ .name = "sti-vtg",
+ .owner = THIS_MODULE,
+ .of_match_table = vtg_of_match,
+ },
+ .probe = vtg_probe,
+ .remove = vtg_remove,
+};
+
+module_platform_driver(sti_vtg_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
new file mode 100644
index 000000000000..e84d23f1f57f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_VTG_H_
+#define _STI_VTG_H_
+
+#define VTG_TOP_FIELD_EVENT 1
+#define VTG_BOTTOM_FIELD_EVENT 2
+
+struct sti_vtg;
+struct drm_display_mode;
+struct notifier_block;
+
+struct sti_vtg *of_vtg_find(struct device_node *np);
+void sti_vtg_set_config(struct sti_vtg *vtg,
+ const struct drm_display_mode *mode);
+int sti_vtg_register_client(struct sti_vtg *vtg,
+ struct notifier_block *nb, int crtc_id);
+int sti_vtg_unregister_client(struct sti_vtg *vtg,
+ struct notifier_block *nb);
+
+u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y);
+u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index ef40381f3909..6553fd238685 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -18,6 +18,8 @@
struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
+ bool supports_block_linear;
+ unsigned int pitch_align;
};
struct tegra_plane {
@@ -212,15 +214,44 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
- if (window->tiled) {
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = window->tiling.value;
+
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
} else {
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- }
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ break;
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ }
value = WIN_ENABLE;
@@ -288,6 +319,7 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_dc_window window;
unsigned int i;
+ int err;
memset(&window, 0, sizeof(window));
window.src.x = src_x >> 16;
@@ -301,7 +333,10 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
window.format = tegra_dc_format(fb->pixel_format, &window.swap);
window.bits_per_pixel = fb->bits_per_pixel;
window.bottom_up = tegra_fb_is_bottom_up(fb);
- window.tiled = tegra_fb_is_tiled(fb);
+
+ err = tegra_fb_get_tiling(fb, &window.tiling);
+ if (err < 0)
+ return err;
for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -402,8 +437,14 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
{
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
+ struct tegra_bo_tiling tiling;
unsigned int format, swap;
unsigned long value;
+ int err;
+
+ err = tegra_fb_get_tiling(fb, &tiling);
+ if (err < 0)
+ return err;
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -417,15 +458,44 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP);
- if (tegra_fb_is_tiled(fb)) {
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = tiling.value;
+
+ switch (tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
} else {
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- }
+ switch (tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ }
/* make sure bottom-up buffers are properly displayed */
if (tegra_fb_is_bottom_up(fb)) {
@@ -1214,12 +1284,20 @@ static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct tegra_drm *tegra = drm->dev_private;
int err;
drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+ /*
+ * Keep track of the minimum pitch alignment across all display
+ * controllers.
+ */
+ if (dc->soc->pitch_align > tegra->pitch_align)
+ tegra->pitch_align = dc->soc->pitch_align;
+
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
@@ -1277,16 +1355,29 @@ static const struct host1x_client_ops dc_client_ops = {
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 8,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 8,
+};
+
+static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
+ .supports_interlacing = false,
+ .supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 64,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
+ .supports_block_linear = true,
+ .pitch_align = 64,
};
static const struct of_device_id tegra_dc_of_match[] = {
@@ -1303,6 +1394,7 @@ static const struct of_device_id tegra_dc_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, tegra_dc_of_match);
static int tegra_dc_parse_dt(struct tegra_dc *dc)
{
@@ -1430,6 +1522,7 @@ static int tegra_dc_remove(struct platform_device *pdev)
return err;
}
+ reset_control_assert(dc->rst);
clk_disable_unprepare(dc->clk);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 78c5feff95d2..705c93b00794 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -428,6 +428,11 @@
#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
#define DC_WINBUF_UFLOW_STATUS 0x80a
+#define DC_WINBUF_SURFACE_KIND 0x80b
+#define DC_WINBUF_SURFACE_KIND_PITCH (0 << 0)
+#define DC_WINBUF_SURFACE_KIND_TILED (1 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4)
#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 3f132e356e9c..708f783ead47 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -382,6 +382,7 @@ static const struct of_device_id tegra_dpaux_of_match[] = {
{ .compatible = "nvidia,tegra124-dpaux", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
struct platform_driver tegra_dpaux_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3396f9f6a9f7..59736bb810cd 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -40,6 +40,12 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm_mode_config_init(drm);
+ err = tegra_drm_fb_prepare(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
err = host1x_device_init(device);
if (err < 0)
return err;
@@ -59,8 +65,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
return err;
- drm_kms_helper_poll_init(drm);
-
return 0;
}
@@ -128,6 +132,45 @@ host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
return &bo->base;
}
+static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
+ struct drm_tegra_reloc __user *src,
+ struct drm_device *drm,
+ struct drm_file *file)
+{
+ u32 cmdbuf, target;
+ int err;
+
+ err = get_user(cmdbuf, &src->cmdbuf.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(target, &src->target.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->target.offset, &src->cmdbuf.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->shift, &src->shift);
+ if (err < 0)
+ return err;
+
+ dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
+ if (!dest->cmdbuf.bo)
+ return -ENOENT;
+
+ dest->target.bo = host1x_bo_lookup(drm, file, target);
+ if (!dest->target.bo)
+ return -ENOENT;
+
+ return 0;
+}
+
int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file)
@@ -180,26 +223,13 @@ int tegra_drm_submit(struct tegra_drm_context *context,
cmdbufs++;
}
- if (copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs)) {
- err = -EFAULT;
- goto fail;
- }
-
+ /* copy and resolve relocations from submit */
while (num_relocs--) {
- struct host1x_reloc *reloc = &job->relocarray[num_relocs];
- struct host1x_bo *cmdbuf, *target;
-
- cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
- target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
- reloc->cmdbuf = cmdbuf;
- reloc->target = target;
-
- if (!reloc->target || !reloc->cmdbuf) {
- err = -ENOENT;
+ err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
+ &relocs[num_relocs], drm,
+ file);
+ if (err < 0)
goto fail;
- }
}
if (copy_from_user(job->waitchk, waitchks,
@@ -451,11 +481,151 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
return 0;
}
+
+static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_tiling *args = data;
+ enum tegra_bo_tiling_mode mode;
+ struct drm_gem_object *gem;
+ unsigned long value = 0;
+ struct tegra_bo *bo;
+
+ switch (args->mode) {
+ case DRM_TEGRA_GEM_TILING_MODE_PITCH:
+ mode = TEGRA_BO_TILING_MODE_PITCH;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_TILED:
+ mode = TEGRA_BO_TILING_MODE_TILED;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
+ mode = TEGRA_BO_TILING_MODE_BLOCK;
+
+ if (args->value > 5)
+ return -EINVAL;
+
+ value = args->value;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ bo->tiling.mode = mode;
+ bo->tiling.value = value;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_tiling *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+ int err = 0;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ switch (bo->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args->value = bo->tiling.value;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ drm_gem_object_unreference(gem);
+
+ return err;
+}
+
+static int tegra_gem_set_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ bo->flags = 0;
+
+ if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
+ bo->flags |= TEGRA_BO_BOTTOM_UP;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ args->flags = 0;
+
+ if (bo->flags & TEGRA_BO_BOTTOM_UP)
+ args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
#endif
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
@@ -465,6 +635,10 @@ static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED),
#endif
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 6b8fe9d86ed4..e89c70fa82d5 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -19,6 +19,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
+#include "gem.h"
+
struct reset_control;
struct tegra_fb {
@@ -43,6 +45,8 @@ struct tegra_drm {
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_fbdev *fbdev;
#endif
+
+ unsigned int pitch_align;
};
struct tegra_drm_client;
@@ -160,7 +164,8 @@ struct tegra_dc_window {
unsigned int stride[2];
unsigned long base[3];
bool bottom_up;
- bool tiled;
+
+ struct tegra_bo_tiling tiling;
};
/* from dc.c */
@@ -279,7 +284,9 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index);
bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
-bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling);
+int tegra_drm_fb_prepare(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index bd56f2affa78..f7874458926a 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -474,7 +474,8 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
value = tegra_dsi_readl(dsi, DSI_CONTROL);
- value |= DSI_CONTROL_HS_CLK_CTRL;
+ if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ value |= DSI_CONTROL_HS_CLK_CTRL;
value &= ~DSI_CONTROL_TX_TRIG(3);
value &= ~DSI_CONTROL_DCS_ENABLE;
value |= DSI_CONTROL_VIDEO_ENABLE;
@@ -982,6 +983,7 @@ static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra114-dsi", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_dsi_of_match);
struct platform_driver tegra_dsi_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 9798a7080322..3513d12d5aa1 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -46,14 +46,15 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
return false;
}
-bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
- if (fb->planes[0]->flags & TEGRA_BO_TILED)
- return true;
+ /* TODO: handle YUV formats? */
+ *tiling = fb->planes[0]->tiling;
- return false;
+ return 0;
}
static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
@@ -193,6 +194,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
+ struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
struct drm_mode_fb_cmd2 cmd = { 0 };
unsigned int bytes_per_pixel;
@@ -207,7 +209,8 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
cmd.width = sizes->surface_width;
cmd.height = sizes->surface_height;
- cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
+ tegra->pitch_align);
cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
@@ -267,18 +270,13 @@ release:
return err;
}
-static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
.fb_probe = tegra_fbdev_probe,
};
-static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
- unsigned int preferred_bpp,
- unsigned int num_crtc,
- unsigned int max_connectors)
+static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
{
- struct drm_fb_helper *helper;
struct tegra_fbdev *fbdev;
- int err;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev) {
@@ -286,13 +284,23 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
return ERR_PTR(-ENOMEM);
}
- fbdev->base.funcs = &tegra_fb_helper_funcs;
- helper = &fbdev->base;
+ drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs);
+
+ return fbdev;
+}
+
+static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
+ unsigned int preferred_bpp,
+ unsigned int num_crtc,
+ unsigned int max_connectors)
+{
+ struct drm_device *drm = fbdev->base.dev;
+ int err;
err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
if (err < 0) {
dev_err(drm->dev, "failed to initialize DRM FB helper\n");
- goto free;
+ return err;
}
err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
@@ -301,21 +309,17 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
goto fini;
}
- drm_helper_disable_unused_functions(drm);
-
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
dev_err(drm->dev, "failed to set initial configuration\n");
goto fini;
}
- return fbdev;
+ return 0;
fini:
drm_fb_helper_fini(&fbdev->base);
-free:
- kfree(fbdev);
- return ERR_PTR(err);
+ return err;
}
static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
@@ -366,7 +370,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
#endif
};
-int tegra_drm_fb_init(struct drm_device *drm)
+int tegra_drm_fb_prepare(struct drm_device *drm)
{
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
@@ -381,8 +385,7 @@ int tegra_drm_fb_init(struct drm_device *drm)
drm->mode_config.funcs = &tegra_drm_mode_funcs;
#ifdef CONFIG_DRM_TEGRA_FBDEV
- tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
- drm->mode_config.num_connector);
+ tegra->fbdev = tegra_fbdev_create(drm);
if (IS_ERR(tegra->fbdev))
return PTR_ERR(tegra->fbdev);
#endif
@@ -390,6 +393,21 @@ int tegra_drm_fb_init(struct drm_device *drm)
return 0;
}
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (err < 0)
+ return err;
+#endif
+
+ return 0;
+}
+
void tegra_drm_fb_exit(struct drm_device *drm)
{
#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 78cc8143760a..ce023fa3e8ae 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -16,6 +16,7 @@
#include <linux/dma-buf.h>
#include <drm/tegra_drm.h>
+#include "drm.h"
#include "gem.h"
static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
@@ -126,7 +127,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
goto err_mmap;
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
- bo->flags |= TEGRA_BO_TILED;
+ bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
@@ -259,8 +260,10 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
+ min_pitch = round_up(min_pitch, tegra->pitch_align);
if (args->pitch < min_pitch)
args->pitch = min_pitch;
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 2f3fe96c5154..43a25c853357 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -16,8 +16,18 @@
#include <drm/drm.h>
#include <drm/drmP.h>
-#define TEGRA_BO_TILED (1 << 0)
-#define TEGRA_BO_BOTTOM_UP (1 << 1)
+#define TEGRA_BO_BOTTOM_UP (1 << 0)
+
+enum tegra_bo_tiling_mode {
+ TEGRA_BO_TILING_MODE_PITCH,
+ TEGRA_BO_TILING_MODE_TILED,
+ TEGRA_BO_TILING_MODE_BLOCK,
+};
+
+struct tegra_bo_tiling {
+ enum tegra_bo_tiling_mode mode;
+ unsigned long value;
+};
struct tegra_bo {
struct drm_gem_object gem;
@@ -26,6 +36,8 @@ struct tegra_bo {
struct sg_table *sgt;
dma_addr_t paddr;
void *vaddr;
+
+ struct tegra_bo_tiling tiling;
};
static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 7c53941f2a9e..02cd3e37a6ec 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -121,6 +121,7 @@ static const struct of_device_id gr2d_match[] = {
{ .compatible = "nvidia,tegra20-gr2d" },
{ },
};
+MODULE_DEVICE_TABLE(of, gr2d_match);
static const u32 gr2d_addr_regs[] = {
GR2D_UA_BASE_ADDR,
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 30f5ba9bd6d0..0b3f2b977ba0 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -12,7 +12,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <linux/tegra-powergate.h>
+
+#include <soc/tegra/pmc.h>
#include "drm.h"
#include "gem.h"
@@ -130,6 +131,7 @@ static const struct of_device_id tegra_gr3d_match[] = {
{ .compatible = "nvidia,tegra20-gr3d" },
{ }
};
+MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
static const u32 gr3d_addr_regs[] = {
GR3D_IDX_ATTRIBUTE( 0),
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index ba067bb767e3..ffe26547328d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1450,6 +1450,7 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
{ .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
static int tegra_hdmi_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index a3e4f1eca6f7..0c67d7eebc94 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -105,7 +105,7 @@ static void drm_connector_clear(struct drm_connector *connector)
static void tegra_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
drm_connector_clear(connector);
}
@@ -140,7 +140,9 @@ static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
drm_panel_disable(panel);
tegra_output_disable(output);
+ drm_panel_unprepare(panel);
} else {
+ drm_panel_prepare(panel);
tegra_output_enable(output);
drm_panel_enable(panel);
}
@@ -318,7 +320,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
- drm_sysfs_connector_add(&output->connector);
+ drm_connector_register(&output->connector);
output->encoder.possible_crtcs = 0x3;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 27c979b50111..7829e81f065d 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -11,7 +11,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
-#include <linux/tegra-powergate.h>
+
+#include <soc/tegra/pmc.h>
#include <drm/drm_dp_helper.h>
@@ -516,7 +517,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
if (err < 0) {
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- return err;
+ goto unlock;
}
}
@@ -525,7 +526,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
memset(&config, 0, sizeof(config));
- config.bits_per_pixel = 24; /* XXX: don't hardcode? */
+ config.bits_per_pixel = output->connector.display_info.bpc * 3;
err = tegra_sor_calc_config(sor, mode, &config, &link);
if (err < 0)
@@ -815,12 +816,22 @@ static int tegra_output_sor_enable(struct tegra_output *output)
* configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
* raster, associate with display controller)
*/
- value = SOR_STATE_ASY_VSYNCPOL |
- SOR_STATE_ASY_HSYNCPOL |
- SOR_STATE_ASY_PROTOCOL_DP_A |
+ value = SOR_STATE_ASY_PROTOCOL_DP_A |
SOR_STATE_ASY_CRC_MODE_COMPLETE |
SOR_STATE_ASY_OWNER(dc->pipe + 1);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
switch (config.bits_per_pixel) {
case 24:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
@@ -1455,6 +1466,7 @@ static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra124-sor", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
struct platform_driver tegra_sor_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b20b69488dc9..6be623b4a86f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -120,8 +120,8 @@ static int cpufreq_transition(struct notifier_block *nb,
static int tilcdc_unload(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- struct tilcdc_module *mod, *cur;
+ drm_fbdev_cma_fini(priv->fbdev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
@@ -148,11 +148,6 @@ static int tilcdc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
- list_for_each_entry_safe(mod, cur, &module_list, list) {
- DBG("destroying module: %s", mod->name);
- mod->funcs->destroy(mod);
- }
-
kfree(priv);
return 0;
@@ -628,13 +623,13 @@ static int __init tilcdc_drm_init(void)
static void __exit tilcdc_drm_fini(void)
{
DBG("fini");
- tilcdc_tfp410_fini();
- tilcdc_slave_fini();
- tilcdc_panel_fini();
platform_driver_unregister(&tilcdc_platform_driver);
+ tilcdc_panel_fini();
+ tilcdc_slave_fini();
+ tilcdc_tfp410_fini();
}
-late_initcall(tilcdc_drm_init);
+module_init(tilcdc_drm_init);
module_exit(tilcdc_drm_fini);
MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 093803683b25..7596c144a9fb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -98,7 +98,6 @@ struct tilcdc_module;
struct tilcdc_module_ops {
/* create appropriate encoders/connectors: */
int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
- void (*destroy)(struct tilcdc_module *mod);
#ifdef CONFIG_DEBUG_FS
/* create debugfs nodes (can be NULL): */
int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 86c67329b605..4c7aa1d8134f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -151,6 +151,7 @@ struct panel_connector {
static void panel_connector_destroy(struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(panel_connector);
}
@@ -247,7 +248,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
@@ -281,23 +282,8 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
return 0;
}
-static void panel_destroy(struct tilcdc_module *mod)
-{
- struct panel_module *panel_mod = to_panel_module(mod);
-
- if (panel_mod->timings) {
- display_timings_release(panel_mod->timings);
- kfree(panel_mod->timings);
- }
-
- tilcdc_module_cleanup(mod);
- kfree(panel_mod->info);
- kfree(panel_mod);
-}
-
static const struct tilcdc_module_ops panel_module_ops = {
.modeset_init = panel_modeset_init,
- .destroy = panel_destroy,
};
/*
@@ -373,6 +359,7 @@ static int panel_probe(struct platform_device *pdev)
return -ENOMEM;
mod = &panel_mod->base;
+ pdev->dev.platform_data = mod;
tilcdc_module_init(mod, "panel", &panel_module_ops);
@@ -380,17 +367,16 @@ static int panel_probe(struct platform_device *pdev)
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev, "pins are not configured\n");
-
panel_mod->timings = of_get_display_timings(node);
if (!panel_mod->timings) {
dev_err(&pdev->dev, "could not get panel timings\n");
- goto fail;
+ goto fail_free;
}
panel_mod->info = of_get_panel_info(node);
if (!panel_mod->info) {
dev_err(&pdev->dev, "could not get panel info\n");
- goto fail;
+ goto fail_timings;
}
mod->preferred_bpp = panel_mod->info->bpp;
@@ -401,13 +387,26 @@ static int panel_probe(struct platform_device *pdev)
return 0;
-fail:
- panel_destroy(mod);
+fail_timings:
+ display_timings_release(panel_mod->timings);
+
+fail_free:
+ kfree(panel_mod);
+ tilcdc_module_cleanup(mod);
return ret;
}
static int panel_remove(struct platform_device *pdev)
{
+ struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+ struct panel_module *panel_mod = to_panel_module(mod);
+
+ display_timings_release(panel_mod->timings);
+
+ tilcdc_module_cleanup(mod);
+ kfree(panel_mod->info);
+ kfree(panel_mod);
+
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 595068ba2d5e..3775fd49dac4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -166,6 +166,7 @@ struct slave_connector {
static void slave_connector_destroy(struct drm_connector *connector)
{
struct slave_connector *slave_connector = to_slave_connector(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(slave_connector);
}
@@ -261,7 +262,7 @@ static struct drm_connector *slave_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
@@ -295,17 +296,8 @@ static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
return 0;
}
-static void slave_destroy(struct tilcdc_module *mod)
-{
- struct slave_module *slave_mod = to_slave_module(mod);
-
- tilcdc_module_cleanup(mod);
- kfree(slave_mod);
-}
-
static const struct tilcdc_module_ops slave_module_ops = {
.modeset_init = slave_modeset_init,
- .destroy = slave_destroy,
};
/*
@@ -355,10 +347,13 @@ static int slave_probe(struct platform_device *pdev)
}
slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
- if (!slave_mod)
- return -ENOMEM;
+ if (!slave_mod) {
+ ret = -ENOMEM;
+ goto fail_adapter;
+ }
mod = &slave_mod->base;
+ pdev->dev.platform_data = mod;
mod->preferred_bpp = slave_info.bpp;
@@ -373,10 +368,20 @@ static int slave_probe(struct platform_device *pdev)
tilcdc_slave_probedefer(false);
return 0;
+
+fail_adapter:
+ i2c_put_adapter(slavei2c);
+ return ret;
}
static int slave_remove(struct platform_device *pdev)
{
+ struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+ struct slave_module *slave_mod = to_slave_module(mod);
+
+ tilcdc_module_cleanup(mod);
+ kfree(slave_mod);
+
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c38b56b268ac..354c47ca6374 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -167,6 +167,7 @@ struct tfp410_connector {
static void tfp410_connector_destroy(struct drm_connector *connector)
{
struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(tfp410_connector);
}
@@ -261,7 +262,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
@@ -295,23 +296,8 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev
return 0;
}
-static void tfp410_destroy(struct tilcdc_module *mod)
-{
- struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
-
- if (tfp410_mod->i2c)
- i2c_put_adapter(tfp410_mod->i2c);
-
- if (!IS_ERR_VALUE(tfp410_mod->gpio))
- gpio_free(tfp410_mod->gpio);
-
- tilcdc_module_cleanup(mod);
- kfree(tfp410_mod);
-}
-
static const struct tilcdc_module_ops tfp410_module_ops = {
.modeset_init = tfp410_modeset_init,
- .destroy = tfp410_destroy,
};
/*
@@ -341,6 +327,7 @@ static int tfp410_probe(struct platform_device *pdev)
return -ENOMEM;
mod = &tfp410_mod->base;
+ pdev->dev.platform_data = mod;
tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
@@ -364,6 +351,7 @@ static int tfp410_probe(struct platform_device *pdev)
tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
if (!tfp410_mod->i2c) {
dev_err(&pdev->dev, "could not get i2c\n");
+ of_node_put(i2c_node);
goto fail;
}
@@ -377,19 +365,32 @@ static int tfp410_probe(struct platform_device *pdev)
ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
if (ret) {
dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
- goto fail;
+ goto fail_adapter;
}
}
return 0;
+fail_adapter:
+ i2c_put_adapter(tfp410_mod->i2c);
+
fail:
- tfp410_destroy(mod);
+ kfree(tfp410_mod);
+ tilcdc_module_cleanup(mod);
return ret;
}
static int tfp410_remove(struct platform_device *pdev)
{
+ struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+
+ i2c_put_adapter(tfp410_mod->i2c);
+ gpio_free(tfp410_mod->gpio);
+
+ tilcdc_module_cleanup(mod);
+ kfree(tfp410_mod);
+
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4ab9f7171c4f..3da89d5dab60 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = __ttm_bo_reserve(entry, false, true, false, 0);
+ ret = __ttm_bo_reserve(entry, false, true, false, NULL);
if (remove_all && ret) {
spin_unlock(&glob->lru_lock);
ret = __ttm_bo_reserve(entry, false, false,
- false, 0);
+ false, NULL);
spin_lock(&glob->lru_lock);
}
@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
if (!ret)
break;
}
@@ -784,7 +784,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
int ret;
do {
- ret = (*man->func->get_node)(man, bo, placement, mem);
+ ret = (*man->func->get_node)(man, bo, placement, 0, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
@@ -897,7 +897,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (man->has_type && man->use_type) {
type_found = true;
- ret = (*man->func->get_node)(man, bo, placement, mem);
+ ret = (*man->func->get_node)(man, bo, placement,
+ cur_flags, mem);
if (unlikely(ret))
return ret;
}
@@ -937,7 +938,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
-
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
@@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
if (unlikely(ret != 0))
return ret;
spin_lock(&bdev->fence_lock);
@@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
if (!ret)
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index bd850c9f4bca..9e103a4875c8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -50,6 +50,7 @@ struct ttm_range_manager {
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -67,7 +68,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
- if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
+ if (flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP;
spin_lock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1df856f78568..30e5d90cb7bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
pgprot_val(tmp) |= _PAGE_GUARDED;
}
#endif
-#if defined(__ia64__)
+#if defined(__ia64__) || defined(__arm__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index d7f92fe9d904..66fc6395eb54 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -35,7 +35,7 @@
#include <drm/drm_sysfs.h>
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-atomic_t device_released;
+static atomic_t device_released;
static struct device_type ttm_drm_class_type = {
.name = "ttm",
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 863bef9f9234..09874d695188 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
+ * @gfp: GFP flags.
**/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+ gfp_t gfp)
{
unsigned long irq_flags;
struct page *p;
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
@@ -382,32 +383,35 @@ out:
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
- * this can deadlock when called a sc->gfp_mask that is not equal to
- * GFP_KERNEL.
+ * We need to pass sc->gfp_mask to ttm_page_pool_free().
*
* This code is crying out for a shrinker per pool....
*/
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(lock);
+ static unsigned start_pool;
unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
- pool_offset = pool_offset % NUM_POOLS;
+ if (!mutex_trylock(&lock))
+ return SHRINK_STOP;
+ pool_offset = ++start_pool % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- shrink_pages = ttm_page_pool_free(pool, nr_free);
+ shrink_pages = ttm_page_pool_free(pool, nr_free,
+ sc->gfp_mask);
freed += nr_free - shrink_pages;
}
+ mutex_unlock(&lock);
return freed;
}
@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
- ttm_page_pool_free(pool, npages);
+ ttm_page_pool_free(pool, npages, GFP_KERNEL);
}
/*
@@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
return 0;
}
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
char *name)
{
spin_lock_init(&pool->lock);
@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
+ GFP_KERNEL);
kobject_put(&_manager->kobj);
_manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index fb8259f69839..c96db433f8af 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
+ * @gfp: GFP flags.
**/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+ gfp_t gfp)
{
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
npages_to_free, nr_free);
}
#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
if (pool->type != type)
continue;
/* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
@@ -847,6 +848,7 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
+ ttm_dma->cpu_address[index] = d_page->vaddr;
ttm_dma->dma_address[index] = d_page->dma;
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
r = 0;
@@ -978,12 +980,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
INIT_LIST_HEAD(&ttm_dma->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
+ ttm_dma->cpu_address[i] = 0;
ttm_dma->dma_address[i] = 0;
}
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
- ttm_dma_page_pool_free(pool, npages);
+ ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -993,10 +996,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
- * bad.
+ * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
*
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
@@ -1004,9 +1004,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static unsigned start_pool;
unsigned idx = 0;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
unsigned long freed = 0;
@@ -1014,8 +1014,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&_manager->pools))
return SHRINK_STOP;
- mutex_lock(&_manager->lock);
- pool_offset = pool_offset % _manager->npools;
+ if (!mutex_trylock(&_manager->lock))
+ return SHRINK_STOP;
+ if (!_manager->npools)
+ goto out;
+ pool_offset = ++start_pool % _manager->npools;
list_for_each_entry(p, &_manager->pools, pools) {
unsigned nr_free;
@@ -1027,13 +1030,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
+ sc->gfp_mask);
freed += nr_free - shrink_pages;
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
+out:
mutex_unlock(&_manager->lock);
return freed;
}
@@ -1044,7 +1049,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
struct device_pools *p;
unsigned long count = 0;
- mutex_lock(&_manager->lock);
+ if (!mutex_trylock(&_manager->lock))
+ return 0;
list_for_each_entry(p, &_manager->pools, pools)
count += p->pool->npages_free;
mutex_unlock(&_manager->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 75f319090043..bf080abc86d1 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -55,9 +55,12 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
- ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
- sizeof(*ttm->dma_address));
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->ttm.pages) +
+ sizeof(*ttm->dma_address) +
+ sizeof(*ttm->cpu_address));
+ ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+ ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
}
#ifdef CONFIG_X86
@@ -228,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
INIT_LIST_HEAD(&ttm_dma->pages_list);
ttm_dma_tt_alloc_page_directory(ttm_dma);
- if (!ttm->pages || !ttm_dma->dma_address) {
+ if (!ttm->pages) {
ttm_tt_destroy(ttm);
pr_err("Failed allocating page table\n");
return -ENOMEM;
@@ -243,7 +246,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
drm_free_large(ttm->pages);
ttm->pages = NULL;
- drm_free_large(ttm_dma->dma_address);
+ ttm_dma->cpu_address = NULL;
ttm_dma->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b44d548c56f8..e026a9e2942a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,14 +105,7 @@ static struct drm_encoder*
udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
+ return drm_encoder_find(connector->dev, enc_id);
}
static int udl_connector_set_property(struct drm_connector *connector,
@@ -124,7 +117,7 @@ static int udl_connector_set_property(struct drm_connector *connector,
static void udl_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -154,7 +147,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
drm_object_attach_property(&connector->base,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 377176372da8..d1da339843ca 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -550,7 +550,7 @@ out:
return ret;
}
-static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
.fb_probe = udlfb_create,
};
@@ -583,7 +583,8 @@ int udl_fbdev_init(struct drm_device *dev)
return -ENOMEM;
udl->fbdev = ufbdev;
- ufbdev->helper.funcs = &udl_fb_helper_funcs;
+
+ drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
ret = drm_fb_helper_init(dev, &ufbdev->helper,
1, 1);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c041cd73f399..8044f5fb7c49 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
-static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
+static int udl_gem_get_pages(struct udl_gem_object *obj)
{
struct page **pages;
if (obj->pages)
return 0;
- pages = drm_gem_get_pages(&obj->base, gfpmask);
+ pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj)
return 0;
}
- ret = udl_gem_get_pages(obj, GFP_KERNEL);
+ ret = udl_gem_get_pages(obj);
if (ret)
return ret;
@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
}
gobj = to_udl_bo(obj);
- ret = udl_gem_get_pages(gobj, GFP_KERNEL);
+ ret = udl_gem_get_pages(gobj);
if (ret)
goto out;
ret = drm_gem_create_mmap_offset(obj);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 7094b92d1ec7..42795674bc07 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -306,10 +306,23 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG("\n");
ret = udl_modeset_init(dev);
+ if (ret)
+ goto err;
ret = udl_fbdev_init(dev);
+ if (ret)
+ goto err;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ goto err_fb;
+
return 0;
+err_fb:
+ udl_fbdev_cleanup(dev);
err:
+ if (udl->urbs.count)
+ udl_free_urb_list(dev);
kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
@@ -325,6 +338,8 @@ int udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
+ drm_vblank_cleanup(dev);
+
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index cddc4fcf35cf..dc145d320b25 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -363,6 +363,26 @@ static void udl_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
+static int udl_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct udl_framebuffer *ufb = to_udl_fb(fb);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (event)
+ drm_send_vblank_event(dev, 0, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ crtc->primary->fb = fb;
+
+ return 0;
+}
+
static void udl_crtc_prepare(struct drm_crtc *crtc)
{
}
@@ -384,6 +404,7 @@ static struct drm_crtc_helper_funcs udl_helper_funcs = {
static const struct drm_crtc_funcs udl_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.destroy = udl_crtc_destroy,
+ .page_flip = udl_crtc_page_flip,
};
static int udl_crtc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 458cdf6d81e8..ce0ab951f507 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
+ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
+ vmwgfx_cmdbuf_res.o \
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
new file mode 100644
index 000000000000..bfeb4b1f2acf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -0,0 +1,341 @@
+/**************************************************************************
+ *
+ * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
+
+enum vmw_cmdbuf_res_state {
+ VMW_CMDBUF_RES_COMMITED,
+ VMW_CMDBUF_RES_ADD,
+ VMW_CMDBUF_RES_DEL
+};
+
+/**
+ * struct vmw_cmdbuf_res - Command buffer managed resource entry.
+ *
+ * @res: Refcounted pointer to a struct vmw_resource.
+ * @hash: Hash entry for the manager hash table.
+ * @head: List head used either by the staging list or the manager list
+ * of commited resources.
+ * @state: Staging state of this resource entry.
+ * @man: Pointer to a resource manager for this entry.
+ */
+struct vmw_cmdbuf_res {
+ struct vmw_resource *res;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_cmdbuf_res_state state;
+ struct vmw_cmdbuf_res_manager *man;
+};
+
+/**
+ * struct vmw_cmdbuf_res_manager - Command buffer resource manager.
+ *
+ * @resources: Hash table containing staged and commited command buffer
+ * resources
+ * @list: List of commited command buffer resources.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @resources and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_cmdbuf_res_manager {
+ struct drm_open_hash resources;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
+
+/**
+ * vmw_cmdbuf_res_lookup - Look up a command buffer resource
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @resource_type: The resource type, that combined with the user key
+ * identifies the resource.
+ * @user_key: The user key.
+ *
+ * Returns a valid refcounted struct vmw_resource pointer on success,
+ * an error pointer on failure.
+ */
+struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = user_key | (res_type << 24);
+
+ ret = drm_ht_find_item(&man->resources, key, &hash);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
+ return vmw_resource_reference
+ (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
+}
+
+/**
+ * vmw_cmdbuf_res_free - Free a command buffer resource.
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @entry: Pointer to a struct vmw_cmdbuf_res.
+ *
+ * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
+ * struct vmw_resource.
+ */
+static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_cmdbuf_res *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+ vmw_resource_unreference(&entry->res);
+ kfree(entry);
+}
+
+/**
+ * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
+ *
+ * @list: Caller's list of command buffer resource actions.
+ *
+ * This function commits a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_cmdbuf_res_commit(struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ entry->state = VMW_CMDBUF_RES_COMMITED;
+ list_add_tail(&entry->head, &entry->man->list);
+ break;
+ case VMW_CMDBUF_RES_DEL:
+ vmw_resource_unreference(&entry->res);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @list: Caller's list of command buffer resource action
+ *
+ * This function reverts a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_cmdbuf_res_revert(struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ vmw_cmdbuf_res_free(entry->man, entry);
+ break;
+ case VMW_CMDBUF_RES_DEL:
+ ret = drm_ht_insert_item(&entry->man->resources,
+ &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &entry->man->list);
+ entry->state = VMW_CMDBUF_RES_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @res: Valid (refcount != 0) pointer to a struct vmw_resource.
+ * @list: The staging list.
+ *
+ * This function allocates a struct vmw_cmdbuf_res entry and adds the
+ * resource to the hash table of the manager identified by @man. The
+ * entry is then put on the staging list identified by @list.
+ */
+int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct vmw_resource *res,
+ struct list_head *list)
+{
+ struct vmw_cmdbuf_res *cres;
+ int ret;
+
+ cres = kzalloc(sizeof(*cres), GFP_KERNEL);
+ if (unlikely(cres == NULL))
+ return -ENOMEM;
+
+ cres->hash.key = user_key | (res_type << 24);
+ ret = drm_ht_insert_item(&man->resources, &cres->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ cres->state = VMW_CMDBUF_RES_ADD;
+ cres->res = vmw_resource_reference(res);
+ cres->man = man;
+ list_add_tail(&cres->head, list);
+
+out_invalid_key:
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @list: The staging list.
+ *
+ * This function looks up the struct vmw_cmdbuf_res entry from the manager
+ * hash table and, if it exists, removes it. Depending on its current staging
+ * state it then either removes the entry from the staging list or adds it
+ * to it with a staging state of removal.
+ */
+int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry;
+ struct drm_hash_item *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&man->resources, user_key, &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
+
+ entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
+
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ vmw_cmdbuf_res_free(man, entry);
+ break;
+ case VMW_CMDBUF_RES_COMMITED:
+ (void) drm_ht_remove_item(&man->resources, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_CMDBUF_RES_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
+ * manager.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * Allocates and initializes a command buffer managed resource manager. Returns
+ * an error pointer on failure.
+ */
+struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_cmdbuf_res_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (man == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
+ * manager.
+ *
+ * @man: Pointer to the manager to destroy.
+ *
+ * This function destroys a command buffer managed resource manager and
+ * unreferences / frees all command buffer managed resources and -entries
+ * associated with it.
+ */
+void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_cmdbuf_res_free(man, entry);
+
+ kfree(man);
+}
+
+/**
+ *
+ * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
+ * resource manager
+ *
+ * Returns the approximate allocation size of a command buffer managed
+ * resource manager.
+ */
+size_t vmw_cmdbuf_res_man_size(void)
+{
+ static size_t res_man_size;
+
+ if (unlikely(res_man_size == 0))
+ res_man_size =
+ ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
+ ttm_round_pot(sizeof(struct hlist_head) <<
+ VMW_CMDBUF_RES_MAN_HT_ORDER);
+
+ return res_man_size;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 8bb26dcd9eae..5ac92874404d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -33,6 +33,7 @@ struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
struct vmw_ctx_binding_state cbs;
+ struct vmw_cmdbuf_res_manager *man;
};
@@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
-
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
@@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
+ vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
- (void) vmw_context_binding_state_kill
- (&container_of(res, struct vmw_user_context, res)->cbs);
+ (void) vmw_context_binding_state_kill(&uctx->cbs);
(void) vmw_gb_context_destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
@@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
ret = vmw_resource_init(dev_priv, res, true,
res_free, &vmw_gb_context_func);
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+ if (unlikely(ret != 0))
+ goto out_err;
- if (unlikely(ret != 0)) {
- if (res_free)
- res_free(res);
- else
- kfree(res);
- return ret;
+ if (dev_priv->has_mob) {
+ uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
+ if (unlikely(IS_ERR(uctx->man))) {
+ ret = PTR_ERR(uctx->man);
+ uctx->man = NULL;
+ goto out_err;
+ }
}
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
@@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+
+out_err:
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
*/
if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
+ ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
@@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
+
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
+{
+ return container_of(ctx, struct vmw_user_context, res)->man;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 70ddce8358b0..ed1d51006ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -61,7 +61,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -212,7 +212,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 246a62bab378..18b54acacfbb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -316,7 +316,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, false, 0);
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0);
ret = ttm_bo_kmap(bo, 0, 1, &map);
@@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
- vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
- vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
- if (IS_ERR(vmw_fp->shman))
- goto out_no_shman;
-
file_priv->driver_priv = vmw_fp;
return 0;
-out_no_shman:
- ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index c886c024c637..99f731757c4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,10 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20140325"
+#define VMWGFX_DRIVER_DATE "20140704"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -75,14 +75,11 @@
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
-struct vmw_compat_shader_manager;
-
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
bool gb_aware;
- struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -124,6 +121,10 @@ struct vmw_resource {
void (*hw_destroy) (struct vmw_resource *res);
};
+
+/*
+ * Resources that are managed using ioctls.
+ */
enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
@@ -132,6 +133,15 @@ enum vmw_res_type {
vmw_res_max
};
+/*
+ * Resources that are managed using command streams.
+ */
+enum vmw_cmdbuf_res_type {
+ vmw_cmdbuf_res_compat_shader
+};
+
+struct vmw_cmdbuf_res_manager;
+
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
@@ -341,7 +351,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
- struct list_head staged_shaders;
+ struct list_head staged_cmd_res;
};
struct vmw_legacy_display;
@@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
-
+extern struct vmw_cmdbuf_res_manager *
+vmw_context_res_man(struct vmw_resource *ctx);
/*
* Surface management - vmwgfx_surface.c
*/
@@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
- SVGA3dShaderType shader_type,
- u32 *user_key);
-extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
- struct list_head *list);
-extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
- struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
- u32 user_key,
- SVGA3dShaderType shader_type,
- struct list_head *list);
-extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
- struct ttm_object_file *tfile,
struct list_head *list);
-extern struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv);
-extern void
-vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type);
+
+/*
+ * Command buffer managed resources - vmwgfx_cmdbuf_res.c
+ */
+
+extern struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
+extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
+extern size_t vmw_cmdbuf_res_man_size(void);
+extern struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key);
+extern void vmw_cmdbuf_res_revert(struct list_head *list);
+extern void vmw_cmdbuf_res_commit(struct list_head *list);
+extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct vmw_resource *res,
+ struct list_head *list);
+extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct list_head *list);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 87df0b3674fd..7bfdaa163a33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -422,28 +422,91 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
return 0;
}
+
+/**
+ * vmw_cmd_res_reloc_add - Add a resource to a software context's
+ * relocation- and validation lists.
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @id_loc: Pointer to where the id that needs translation is located.
+ * @res: Valid pointer to a struct vmw_resource.
+ * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
+ * used for this resource is returned here.
+ */
+static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ uint32_t *id_loc,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_val)
+{
+ int ret;
+ struct vmw_resource_val_node *node;
+
+ *p_val = NULL;
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id_loc - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (res_type == vmw_res_context && dev_priv->has_mob &&
+ node->first_usage) {
+
+ /*
+ * Put contexts first on the list to be able to exit
+ * list traversal for contexts early.
+ */
+ list_del(&node->head);
+ list_add(&node->head, &sw_context->resource_list);
+
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_err;
+ node->staged_bindings =
+ kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+ if (node->staged_bindings == NULL) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ goto out_err;
+ }
+ INIT_LIST_HEAD(&node->staged_bindings->list);
+ }
+
+ if (p_val)
+ *p_val = node;
+
+out_err:
+ return ret;
+}
+
+
/**
- * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: user-space resource id handle.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int
-vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t id,
- uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -451,7 +514,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (id == SVGA3D_INVALID_ID) {
+ if (*id_loc == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
@@ -466,7 +529,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
* resource
*/
- if (likely(rcache->valid && id == rcache->handle)) {
+ if (likely(rcache->valid && *id_loc == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -480,49 +543,28 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->fp->tfile,
- id,
+ *id_loc,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) id);
+ (unsigned) *id_loc);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = id;
-
- ret = vmw_resource_relocation_add(&sw_context->res_relocations,
- res,
- id_loc - sw_context->buf_start);
- if (unlikely(ret != 0))
- goto out_no_reloc;
+ rcache->handle = *id_loc;
- ret = vmw_resource_val_add(sw_context, res, &node);
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+ res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
rcache->node = node;
if (p_val)
*p_val = node;
-
- if (dev_priv->has_mob && node->first_usage &&
- res_type == vmw_res_context) {
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
- if (unlikely(ret != 0))
- goto out_no_reloc;
- node->staged_bindings =
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
- if (node->staged_bindings == NULL) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
- goto out_no_reloc;
- }
- INIT_LIST_HEAD(&node->staged_bindings->list);
- }
-
vmw_resource_unreference(&res);
return 0;
@@ -534,31 +576,6 @@ out_no_reloc:
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
- *
- * @dev_priv: Pointer to a device private structure.
- * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
- * @converter: User-space visisble type specific information.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
- */
-static int
-vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
-{
- return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
- converter, *id_loc, id_loc, p_val);
-}
-
-/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
@@ -572,8 +589,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
- if (likely(!val->staged_bindings))
- continue;
+ if (unlikely(!val->staged_bindings))
+ break;
ret = vmw_context_rebind_all(val->res);
if (unlikely(ret != 0)) {
@@ -1626,13 +1643,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
} *cmd;
int ret;
size_t size;
+ struct vmw_resource_val_node *val;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- NULL);
+ &val);
if (unlikely(ret != 0))
return ret;
@@ -1640,11 +1658,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return 0;
size = cmd->header.size - sizeof(cmd->body);
- ret = vmw_compat_shader_add(sw_context->fp->shman,
+ ret = vmw_compat_shader_add(dev_priv,
+ vmw_context_res_man(val->res),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
- sw_context->fp->tfile,
- &sw_context->staged_shaders);
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1672,23 +1690,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
+ struct vmw_resource_val_node *val;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- NULL);
+ &val);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
cmd->body.shid,
cmd->body.type,
- &sw_context->staged_shaders);
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1715,7 +1734,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
- struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *ctx_node, *res_node = NULL;
+ struct vmw_ctx_bindinfo bi;
+ struct vmw_resource *res = NULL;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -1727,32 +1748,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
- struct vmw_resource_val_node *res_node;
- u32 shid = cmd->body.shid;
-
- if (shid != SVGA3D_INVALID_ID)
- (void) vmw_compat_shader_lookup(sw_context->fp->shman,
- cmd->body.type,
- &shid);
-
- ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
- vmw_res_shader,
- user_shader_converter,
- shid,
- &cmd->body.shid, &res_node);
+ if (!dev_priv->has_mob)
+ return 0;
+
+ if (cmd->body.shid != SVGA3D_INVALID_ID) {
+ res = vmw_compat_shader_lookup
+ (vmw_context_res_man(ctx_node->res),
+ cmd->body.shid,
+ cmd->body.type);
+
+ if (!IS_ERR(res)) {
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
+ vmw_res_shader,
+ &cmd->body.shid, res,
+ &res_node);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ }
+
+ if (!res_node) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ &cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_shader;
- bi.i1.shader_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
- return 0;
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_shader;
+ bi.i1.shader_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
/**
@@ -2394,6 +2423,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
}
+
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
@@ -2453,7 +2484,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
- INIT_LIST_HEAD(&sw_context->staged_shaders);
+ INIT_LIST_HEAD(&sw_context->staged_cmd_res);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
@@ -2548,8 +2579,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
- vmw_compat_shaders_commit(sw_context->fp->shman,
- &sw_context->staged_shaders);
+ vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -2576,8 +2606,7 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
- vmw_compat_shaders_revert(sw_context->fp->shman,
- &sw_context->staged_shaders);
+ vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b1273e8e9a69..26f8bdde3529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -47,6 +47,7 @@ struct vmwgfx_gmrid_man {
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8f3edc4710f2..d2bc2b03d4c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,7 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
- drm_sysfs_connector_remove(&du->connector);
+ drm_connector_unregister(&du->connector);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
@@ -136,7 +136,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
@@ -343,7 +343,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(bo, true, false, false, 0);
+ ret = ttm_bo_reserve(bo, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
@@ -1501,7 +1501,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
@@ -1519,13 +1518,12 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
return 0;
}
- obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, arg->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b2b9bd23aeee..15e185ae4c99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -371,7 +371,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
- (void) drm_sysfs_connector_add(connector);
+ (void) drm_connector_register(connector);
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 01d68f0a69dc..a432c0db257c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -127,7 +127,7 @@ static void vmw_resource_release(struct kref *kref)
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
- ttm_bo_reserve(bo, false, false, false, 0);
+ ttm_bo_reserve(bo, false, false, false, NULL);
if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index a95d3a0cabe4..b295463a60b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -467,7 +467,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
- (void) drm_sysfs_connector_add(connector);
+ (void) drm_connector_register(connector);
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index c1559eeaffe9..8719fb3cccc9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,8 +29,6 @@
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
-#define VMW_COMPAT_SHADER_HT_ORDER 12
-
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
@@ -42,49 +40,8 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
-/**
- * enum vmw_compat_shader_state - Staging state for compat shaders
- */
-enum vmw_compat_shader_state {
- VMW_COMPAT_COMMITED,
- VMW_COMPAT_ADD,
- VMW_COMPAT_DEL
-};
-
-/**
- * struct vmw_compat_shader - Metadata for compat shaders.
- *
- * @handle: The TTM handle of the guest backed shader.
- * @tfile: The struct ttm_object_file the guest backed shader is registered
- * with.
- * @hash: Hash item for lookup.
- * @head: List head for staging lists or the compat shader manager list.
- * @state: Staging state.
- *
- * The structure is protected by the cmdbuf lock.
- */
-struct vmw_compat_shader {
- u32 handle;
- struct ttm_object_file *tfile;
- struct drm_hash_item hash;
- struct list_head head;
- enum vmw_compat_shader_state state;
-};
-
-/**
- * struct vmw_compat_shader_manager - Compat shader manager.
- *
- * @shaders: Hash table containing staged and commited compat shaders
- * @list: List of commited shaders.
- * @dev_priv: Pointer to a device private structure.
- *
- * @shaders and @list are protected by the cmdbuf mutex for now.
- */
-struct vmw_compat_shader_manager {
- struct drm_open_hash shaders;
- struct list_head list;
- struct vmw_private *dev_priv;
-};
+static uint64_t vmw_user_shader_size;
+static uint64_t vmw_shader_size;
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
-static uint64_t vmw_user_shader_size;
-
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res)
vmw_user_shader_size);
}
+static void vmw_shader_free(struct vmw_resource *res)
+{
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ kfree(shader);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_shader_size);
+}
+
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
@@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-static int vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
- size_t shader_size,
- size_t offset,
- SVGA3dShaderType shader_type,
- struct ttm_object_file *tfile,
- u32 *handle)
+static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
@@ -442,6 +407,56 @@ out:
}
+struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type)
+{
+ struct vmw_shader *shader;
+ struct vmw_resource *res;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_shader_size == 0))
+ vmw_shader_size =
+ ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out_err;
+ }
+
+ shader = kzalloc(sizeof(*shader), GFP_KERNEL);
+ if (unlikely(shader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_shader_size);
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ res = &shader->res;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_shader_free);
+
+out_err:
+ return ret ? ERR_PTR(ret) : res;
+}
+
+
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_bad_arg;
- ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
- shader_type, tfile, &arg->shader_handle);
+ ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
@@ -500,202 +515,83 @@ out_bad_arg:
}
/**
- * vmw_compat_shader_lookup - Look up a compat shader
- *
- * @man: Pointer to the compat shader manager.
- * @shader_type: The shader type, that combined with the user_key identifies
- * the shader.
- * @user_key: On entry, this should be a pointer to the user_key.
- * On successful exit, it will contain the guest-backed shader's TTM handle.
+ * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * shader type are within valid bounds.
*
- * Returns 0 on success. Non-zero on failure, in which case the value pointed
- * to by @user_key is unmodified.
- */
-int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
- SVGA3dShaderType shader_type,
- u32 *user_key)
-{
- struct drm_hash_item *hash;
- int ret;
- unsigned long key = *user_key | (shader_type << 24);
-
- ret = drm_ht_find_item(&man->shaders, key, &hash);
- if (unlikely(ret != 0))
- return ret;
-
- *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
- hash)->handle;
-
- return 0;
-}
-
-/**
- * vmw_compat_shader_free - Free a compat shader.
- *
- * @man: Pointer to the compat shader manager.
- * @entry: Pointer to a struct vmw_compat_shader.
- *
- * Frees a struct vmw_compat_shder entry and drops its reference to the
- * guest backed shader.
- */
-static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
- struct vmw_compat_shader *entry)
-{
- list_del(&entry->head);
- WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
- WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
- TTM_REF_USAGE));
- kfree(entry);
-}
-
-/**
- * vmw_compat_shaders_commit - Commit a list of compat shader actions.
- *
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
*
- * This function commits a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions has commited the fifo contents to the device.
+ * Returns true if valid false if not.
*/
-void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
- struct list_head *list)
+static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
-
- list_for_each_entry_safe(entry, next, list, head) {
- list_del(&entry->head);
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- entry->state = VMW_COMPAT_COMMITED;
- list_add_tail(&entry->head, &man->list);
- break;
- case VMW_COMPAT_DEL:
- ttm_ref_object_base_unref(entry->tfile, entry->handle,
- TTM_REF_USAGE);
- kfree(entry);
- break;
- default:
- BUG();
- break;
- }
- }
+ return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
/**
- * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
*
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
*
- * This function reverts a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions failed for some reason, and the command stream was never
- * submitted.
+ * Returns a hash key suitable for a command buffer managed resource
+ * manager hash table.
*/
-void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
- struct list_head *list)
+static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
- int ret;
-
- list_for_each_entry_safe(entry, next, list, head) {
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- vmw_compat_shader_free(man, entry);
- break;
- case VMW_COMPAT_DEL:
- ret = drm_ht_insert_item(&man->shaders, &entry->hash);
- list_del(&entry->head);
- list_add_tail(&entry->head, &man->list);
- entry->state = VMW_COMPAT_COMMITED;
- break;
- default:
- BUG();
- break;
- }
- }
+ return user_key | (shader_type << 20);
}
/**
* vmw_compat_shader_remove - Stage a compat shader for removal.
*
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
- * @list: Caller's list of staged shader actions.
- *
- * This function stages a compat shader for removal and removes the key from
- * the shader manager's hash table. If the shader was previously only staged
- * for addition it is completely removed (But the execbuf code may keep a
- * reference if it was bound to a context between addition and removal). If
- * it was previously commited to the manager, it is staged for removal.
+ * @list: Caller's list of staged command buffer resource actions.
*/
-int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
- struct vmw_compat_shader *entry;
- struct drm_hash_item *hash;
- int ret;
-
- ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
- &hash);
- if (likely(ret != 0))
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
- entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
-
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- vmw_compat_shader_free(man, entry);
- break;
- case VMW_COMPAT_COMMITED:
- (void) drm_ht_remove_item(&man->shaders, &entry->hash);
- list_del(&entry->head);
- entry->state = VMW_COMPAT_DEL;
- list_add_tail(&entry->head, list);
- break;
- default:
- BUG();
- break;
- }
-
- return 0;
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key,
+ shader_type),
+ list);
}
/**
- * vmw_compat_shader_add - Create a compat shader and add the
- * key to the manager
+ * vmw_compat_shader_add - Create a compat shader and stage it for addition
+ * as a command buffer managed resource.
*
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
* to be created with.
- * @list: Caller's list of staged shader actions.
+ * @list: Caller's list of staged command buffer resource actions.
*
- * Note that only the key is added to the shader manager's hash table.
- * The shader is not yet added to the shader manager's list of shaders.
*/
-int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
- struct ttm_object_file *tfile,
struct list_head *list)
{
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
- struct vmw_compat_shader *compat;
- u32 handle;
int ret;
+ struct vmw_resource *res;
- if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
/* Allocate and pin a DMA buffer */
@@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
if (unlikely(buf == NULL))
return -ENOMEM;
- ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto out;
@@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
- /* Create a guest-backed shader container backed by the dma buffer */
- ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
- tfile, &handle);
- vmw_dmabuf_unreference(&buf);
+ res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
goto no_reserve;
- /*
- * Create a compat shader structure and stage it for insertion
- * in the manager
- */
- compat = kzalloc(sizeof(*compat), GFP_KERNEL);
- if (compat == NULL)
- goto no_compat;
-
- compat->hash.key = user_key | (shader_type << 24);
- ret = drm_ht_insert_item(&man->shaders, &compat->hash);
- if (unlikely(ret != 0))
- goto out_invalid_key;
-
- compat->state = VMW_COMPAT_ADD;
- compat->handle = handle;
- compat->tfile = tfile;
- list_add_tail(&compat->head, list);
-
- return 0;
-out_invalid_key:
- kfree(compat);
-no_compat:
- ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key, shader_type),
+ res, list);
+ vmw_resource_unreference(&res);
no_reserve:
+ vmw_dmabuf_unreference(&buf);
out:
return ret;
}
/**
- * vmw_compat_shader_man_create - Create a compat shader manager
- *
- * @dev_priv: Pointer to a device private structure.
- *
- * Typically done at file open time. If successful returns a pointer to a
- * compat shader manager. Otherwise returns an error pointer.
- */
-struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv)
-{
- struct vmw_compat_shader_manager *man;
- int ret;
-
- man = kzalloc(sizeof(*man), GFP_KERNEL);
- if (man == NULL)
- return ERR_PTR(-ENOMEM);
-
- man->dev_priv = dev_priv;
- INIT_LIST_HEAD(&man->list);
- ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
- if (ret == 0)
- return man;
-
- kfree(man);
- return ERR_PTR(ret);
-}
-
-/**
- * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ * vmw_compat_shader_lookup - Look up a compat shader
*
- * @man: Pointer to the shader manager to destroy.
+ * @man: Pointer to the command buffer managed resource manager identifying
+ * the shader namespace.
+ * @user_key: The user space id of the shader.
+ * @shader_type: The shader type.
*
- * Typically done at file close time.
+ * Returns a refcounted pointer to a struct vmw_resource if the shader was
+ * found. An error pointer otherwise.
*/
-void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
-
- mutex_lock(&man->dev_priv->cmdbuf_mutex);
- list_for_each_entry_safe(entry, next, &man->list, head)
- vmw_compat_shader_free(man, entry);
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ return ERR_PTR(-EINVAL);
- mutex_unlock(&man->dev_priv->cmdbuf_mutex);
- kfree(man);
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key,
+ shader_type));
}
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 112f27e51bc7..63bd63f3c7df 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -185,16 +185,16 @@ static unsigned int pin_job(struct host1x_job *job)
struct sg_table *sgt;
dma_addr_t phys_addr;
- reloc->target = host1x_bo_get(reloc->target);
- if (!reloc->target)
+ reloc->target.bo = host1x_bo_get(reloc->target.bo);
+ if (!reloc->target.bo)
goto unpin;
- phys_addr = host1x_bo_pin(reloc->target, &sgt);
+ phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
if (!phys_addr)
goto unpin;
job->addr_phys[job->num_unpins] = phys_addr;
- job->unpins[job->num_unpins].bo = reloc->target;
+ job->unpins[job->num_unpins].bo = reloc->target.bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
}
@@ -235,21 +235,21 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocarray[i];
u32 reloc_addr = (job->reloc_addr_phys[i] +
- reloc->target_offset) >> reloc->shift;
+ reloc->target.offset) >> reloc->shift;
u32 *target;
/* skip all other gathers */
- if (cmdbuf != reloc->cmdbuf)
+ if (cmdbuf != reloc->cmdbuf.bo)
continue;
- if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
+ if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
if (cmdbuf_page_addr)
host1x_bo_kunmap(cmdbuf, last_page,
cmdbuf_page_addr);
cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
- reloc->cmdbuf_offset >> PAGE_SHIFT);
- last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
+ reloc->cmdbuf.offset >> PAGE_SHIFT);
+ last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
if (unlikely(!cmdbuf_page_addr)) {
pr_err("Could not map cmdbuf for relocation\n");
@@ -257,7 +257,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
}
}
- target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
+ target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
*target = reloc_addr;
}
@@ -272,7 +272,7 @@ static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
{
offset *= sizeof(u32);
- if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
+ if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
return false;
return true;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index af0259708358..d2077f040f3e 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -237,12 +237,10 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (conflict->locks & lwants)
return conflict;
- /* Ok, now check if he owns the resource we want. We don't need
- * to check "decodes" since it should be impossible to own
- * own legacy resources you don't decode unless I have a bug
- * in this code...
+ /* Ok, now check if it owns the resource we want. We can
+ * lock resources that are not decoded, therefore a device
+ * can own resources it doesn't decode.
*/
- WARN_ON(conflict->owns & ~conflict->decodes);
match = lwants & conflict->owns;
if (!match)
continue;
@@ -254,13 +252,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
flags = 0;
pci_bits = 0;
+ /* If we can't control legacy resources via the bridge, we
+ * also need to disable normal decoding.
+ */
if (!conflict->bridge_has_one_vga) {
- vga_irq_set_state(conflict, false);
- flags |= PCI_VGA_STATE_CHANGE_DECODES;
- if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM)
pci_bits |= PCI_COMMAND_MEMORY;
- if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO)
pci_bits |= PCI_COMMAND_IO;
+
+ if (pci_bits) {
+ vga_irq_set_state(conflict, false);
+ flags |= PCI_VGA_STATE_CHANGE_DECODES;
+ }
}
if (change_bridge)
@@ -268,18 +272,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
conflict->owns &= ~match;
- /* If he also owned non-legacy, that is no longer the case */
- if (match & VGA_RSRC_LEGACY_MEM)
+
+ /* If we disabled normal decoding, reflect it in owns */
+ if (pci_bits & PCI_COMMAND_MEMORY)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
- if (match & VGA_RSRC_LEGACY_IO)
+ if (pci_bits & PCI_COMMAND_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO;
}
enable_them:
/* ok dude, we got it, everybody conflicting has been disabled, let's
- * enable us. Make sure we don't mark a bit in "owns" that we don't
- * also have in "decodes". We can lock resources we don't decode but
- * not own them.
+ * enable us. Mark any bits in "owns" regardless of whether we
+ * decoded them. We can lock resources we don't decode, therefore
+ * we must track them via "owns".
*/
flags = 0;
pci_bits = 0;
@@ -291,7 +296,7 @@ enable_them:
if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
}
- if (!!(wants & VGA_RSRC_LEGACY_MASK))
+ if (wants & VGA_RSRC_LEGACY_MASK)
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
@@ -299,7 +304,7 @@ enable_them:
if (!vgadev->bridge_has_one_vga) {
vga_irq_set_state(vgadev, true);
}
- vgadev->owns |= (wants & vgadev->decodes);
+ vgadev->owns |= wants;
lock_them:
vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
if (rsrc & VGA_RSRC_LEGACY_IO)
@@ -649,7 +654,6 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
old_decodes = vgadev->decodes;
decodes_removed = ~new_decodes & old_decodes;
decodes_unlocked = vgadev->locks & decodes_removed;
- vgadev->owns &= ~decodes_removed;
vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 5e79c6ad914f..c18d5d71062d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -261,6 +261,20 @@ config HOLTEK_FF
Say Y here if you have a Holtek On Line Grip based game controller
and want to have force feedback support for it.
+config HID_GT683R
+ tristate "MSI GT68xR LED support"
+ depends on LEDS_CLASS && USB_HID
+ ---help---
+ Say Y here if you want to enable support for the three MSI GT68xR LEDs
+
+ This driver support following modes:
+ - Normal: LEDs are fully on when enabled
+ - Audio: LEDs brightness depends on sound level
+ - Breathing: LEDs brightness varies at human breathing rate
+
+ Currently the following devices are know to be supported:
+ - MSI GT683R
+
config HID_HUION
tristate "Huion tablets"
depends on USB_HID
@@ -331,18 +345,20 @@ config HID_LCPOWER
---help---
Support for LC-Power RC1000MCE RF remote control.
-config HID_LENOVO_TPKBD
- tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
+config HID_LENOVO
+ tristate "Lenovo / Thinkpad devices"
depends on HID
select NEW_LEDS
select LEDS_CLASS
---help---
- Support for the Lenovo ThinkPad USB Keyboard with TrackPoint.
+ Support for Lenovo devices that are not fully compliant with HID standard.
- Say Y here if you have a Lenovo ThinkPad USB Keyboard with TrackPoint
- and would like to use device-specific features like changing the
- sensitivity of the trackpoint, using the microphone mute button or
- controlling the mute and microphone mute LEDs.
+ Say Y if you want support for the non-compliant features of the Lenovo
+ Thinkpad standalone keyboards, e.g:
+ - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint
+ configuration)
+ - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
+ - ThinkPad Compact USB Keyboard with TrackPoint (supports Fn keys)
config HID_LOGITECH
tristate "Logitech devices" if EXPERT
@@ -748,12 +764,17 @@ config THRUSTMASTER_FF
Rumble Force or Force Feedback Wheel.
config HID_WACOM
- tristate "Wacom Bluetooth devices support"
+ tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on HID
- depends on LEDS_CLASS
select POWER_SUPPLY
- ---help---
- Support for Wacom Graphire Bluetooth and Intuos4 WL tablets.
+ select NEW_LEDS
+ select LEDS_CLASS
+ help
+ Say Y here if you want to use the USB or BT version of the Wacom Intuos
+ or Graphire tablet.
+
+ To compile this driver as a module, choose M here: the
+ module will be called wacom.
config HID_WIIMOTE
tristate "Nintendo Wii / Wii U peripherals"
@@ -785,7 +806,7 @@ config HID_XINMO
depends on HID
---help---
Support for Xin-Mo devices that are not fully compliant with the HID
- standard. Currently only supports the Xin-Mo Dual Arcade. Say Y here
+ standard. Currently only supports the Xin-Mo Dual Arcade. Say Y here
if you have a Xin-Mo Dual Arcade controller.
config HID_ZEROPLUS
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index a6fa6baf368e..4dbac7f8530c 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_ELO) += hid-elo.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
+obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtek-mouse.o
@@ -59,7 +60,7 @@ obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
-obj-$(CONFIG_HID_LENOVO_TPKBD) += hid-lenovo-tpkbd.o
+obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
@@ -115,7 +116,9 @@ obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
-obj-$(CONFIG_HID_WACOM) += hid-wacom.o
+
+wacom-objs := wacom_wac.o wacom_sys.o
+obj-$(CONFIG_HID_WACOM) += wacom.o
obj-$(CONFIG_HID_WALTOP) += hid-waltop.o
obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o
obj-$(CONFIG_HID_SENSOR_HUB) += hid-sensor-hub.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8ed66fd1ea87..12b6e67d9de0 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -783,10 +783,21 @@ static int hid_scan_report(struct hid_device *hid)
* Vendor specific handlings
*/
if ((hid->vendor == USB_VENDOR_ID_SYNAPTICS) &&
- (hid->group == HID_GROUP_GENERIC))
+ (hid->group == HID_GROUP_GENERIC) &&
+ /* only bind to the mouse interface of composite USB devices */
+ (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE))
/* hid-rmi should take care of them, not hid-generic */
hid->group = HID_GROUP_RMI;
+ /*
+ * Vendor specific handlings
+ */
+ switch (hid->vendor) {
+ case USB_VENDOR_ID_WACOM:
+ hid->group = HID_GROUP_WACOM;
+ break;
+ }
+
vfree(parser);
return 0;
}
@@ -1782,7 +1793,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
- { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -1796,8 +1807,10 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
-#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
+#if IS_ENABLED(CONFIG_HID_LENOVO)
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
@@ -1845,6 +1858,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
@@ -1933,8 +1947,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@@ -2266,6 +2278,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_410) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_SPEAK_510) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_JABRA, USB_DEVICE_ID_JABRA_GN9350E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
@@ -2339,7 +2352,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_CYCLOPS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LCSPEC) },
- { HID_USB_DEVICE(USB_VENDOR_ID_WACOM, HID_ANY_ID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_4_PHIDGETSERVO_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 56be85a9a77c..a822db5a8338 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -240,8 +240,6 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
u8 buf[5];
int ret;
- cp2112_gpio_set(chip, offset, value);
-
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
sizeof(buf), HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
@@ -260,6 +258,12 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
return ret;
}
+ /*
+ * Set gpio value when output direction is already set,
+ * as specified in AN495, Rev. 0.2, cpt. 4.4
+ */
+ cp2112_gpio_set(chip, offset, value);
+
return 0;
}
@@ -425,6 +429,105 @@ static int cp2112_write_req(void *buf, u8 slave_address, u8 command, u8 *data,
return data_length + 4;
}
+static int cp2112_i2c_write_req(void *buf, u8 slave_address, u8 *data,
+ u8 data_length)
+{
+ struct cp2112_write_req_report *report = buf;
+
+ if (data_length > sizeof(report->data))
+ return -EINVAL;
+
+ report->report = CP2112_DATA_WRITE_REQUEST;
+ report->slave_address = slave_address << 1;
+ report->length = data_length;
+ memcpy(report->data, data, data_length);
+ return data_length + 3;
+}
+
+static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data;
+ struct hid_device *hdev = dev->hdev;
+ u8 buf[64];
+ ssize_t count;
+ unsigned int retries;
+ int ret;
+
+ hid_dbg(hdev, "I2C %d messages\n", num);
+
+ if (num != 1) {
+ hid_err(hdev,
+ "Multi-message I2C transactions not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (msgs->flags & I2C_M_RD)
+ count = cp2112_read_req(buf, msgs->addr, msgs->len);
+ else
+ count = cp2112_i2c_write_req(buf, msgs->addr, msgs->buf,
+ msgs->len);
+
+ if (count < 0)
+ return count;
+
+ ret = hid_hw_power(hdev, PM_HINT_FULLON);
+ if (ret < 0) {
+ hid_err(hdev, "power management error: %d\n", ret);
+ return ret;
+ }
+
+ ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT);
+ if (ret < 0) {
+ hid_warn(hdev, "Error starting transaction: %d\n", ret);
+ goto power_normal;
+ }
+
+ for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) {
+ ret = cp2112_xfer_status(dev);
+ if (-EBUSY == ret)
+ continue;
+ if (ret < 0)
+ goto power_normal;
+ break;
+ }
+
+ if (XFER_STATUS_RETRIES <= retries) {
+ hid_warn(hdev, "Transfer timed out, cancelling.\n");
+ buf[0] = CP2112_CANCEL_TRANSFER;
+ buf[1] = 0x01;
+
+ ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT);
+ if (ret < 0)
+ hid_warn(hdev, "Error cancelling transaction: %d\n",
+ ret);
+
+ ret = -ETIMEDOUT;
+ goto power_normal;
+ }
+
+ if (!(msgs->flags & I2C_M_RD))
+ goto finish;
+
+ ret = cp2112_read(dev, msgs->buf, msgs->len);
+ if (ret < 0)
+ goto power_normal;
+ if (ret != msgs->len) {
+ hid_warn(hdev, "short read: %d < %d\n", ret, msgs->len);
+ ret = -EIO;
+ goto power_normal;
+ }
+
+finish:
+ /* return the number of transferred messages */
+ ret = 1;
+
+power_normal:
+ hid_hw_power(hdev, PM_HINT_NORMAL);
+ hid_dbg(hdev, "I2C transfer finished: %d\n", ret);
+ return ret;
+}
+
static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command,
int size, union i2c_smbus_data *data)
@@ -591,7 +694,8 @@ power_normal:
static u32 cp2112_functionality(struct i2c_adapter *adap)
{
- return I2C_FUNC_SMBUS_BYTE |
+ return I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA |
@@ -601,6 +705,7 @@ static u32 cp2112_functionality(struct i2c_adapter *adap)
}
static const struct i2c_algorithm smbus_algorithm = {
+ .master_xfer = cp2112_i2c_xfer,
.smbus_xfer = cp2112_xfer,
.functionality = cp2112_functionality,
};
diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c
new file mode 100644
index 000000000000..0d6f135e266c
--- /dev/null
+++ b/drivers/hid/hid-gt683r.c
@@ -0,0 +1,321 @@
+/*
+ * MSI GT683R led driver
+ *
+ * Copyright (c) 2014 Janne Kanniainen <janne.kanniainen@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define GT683R_BUFFER_SIZE 8
+
+/*
+ * GT683R_LED_OFF: all LEDs are off
+ * GT683R_LED_AUDIO: LEDs brightness depends on sound level
+ * GT683R_LED_BREATHING: LEDs brightness varies at human breathing rate
+ * GT683R_LED_NORMAL: LEDs are fully on when enabled
+ */
+enum gt683r_led_mode {
+ GT683R_LED_OFF = 0,
+ GT683R_LED_AUDIO = 2,
+ GT683R_LED_BREATHING = 3,
+ GT683R_LED_NORMAL = 5
+};
+
+enum gt683r_panels {
+ GT683R_LED_BACK = 0,
+ GT683R_LED_SIDE = 1,
+ GT683R_LED_FRONT = 2,
+ GT683R_LED_COUNT,
+};
+
+static const char * const gt683r_panel_names[] = {
+ "back",
+ "side",
+ "front",
+};
+
+struct gt683r_led {
+ struct hid_device *hdev;
+ struct led_classdev led_devs[GT683R_LED_COUNT];
+ struct mutex lock;
+ struct work_struct work;
+ enum led_brightness brightnesses[GT683R_LED_COUNT];
+ enum gt683r_led_mode mode;
+};
+
+static const struct hid_device_id gt683r_led_id[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
+ { }
+};
+
+static void gt683r_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ int i;
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct gt683r_led *led = hid_get_drvdata(hdev);
+
+ for (i = 0; i < GT683R_LED_COUNT; i++) {
+ if (led_cdev == &led->led_devs[i])
+ break;
+ }
+
+ if (i < GT683R_LED_COUNT) {
+ led->brightnesses[i] = brightness;
+ schedule_work(&led->work);
+ }
+}
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u8 sysfs_mode;
+ struct hid_device *hdev = container_of(dev->parent,
+ struct hid_device, dev);
+ struct gt683r_led *led = hid_get_drvdata(hdev);
+
+ if (led->mode == GT683R_LED_NORMAL)
+ sysfs_mode = 0;
+ else if (led->mode == GT683R_LED_AUDIO)
+ sysfs_mode = 1;
+ else
+ sysfs_mode = 2;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sysfs_mode);
+}
+
+static ssize_t mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u8 sysfs_mode;
+ struct hid_device *hdev = container_of(dev->parent,
+ struct hid_device, dev);
+ struct gt683r_led *led = hid_get_drvdata(hdev);
+
+
+ if (kstrtou8(buf, 10, &sysfs_mode) || sysfs_mode > 2)
+ return -EINVAL;
+
+ mutex_lock(&led->lock);
+
+ if (sysfs_mode == 0)
+ led->mode = GT683R_LED_NORMAL;
+ else if (sysfs_mode == 1)
+ led->mode = GT683R_LED_AUDIO;
+ else
+ led->mode = GT683R_LED_BREATHING;
+
+ mutex_unlock(&led->lock);
+ schedule_work(&led->work);
+
+ return count;
+}
+
+static int gt683r_led_snd_msg(struct gt683r_led *led, u8 *msg)
+{
+ int ret;
+
+ ret = hid_hw_raw_request(led->hdev, msg[0], msg, GT683R_BUFFER_SIZE,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret != GT683R_BUFFER_SIZE) {
+ hid_err(led->hdev,
+ "failed to send set report request: %i\n", ret);
+ if (ret < 0)
+ return ret;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int gt683r_leds_set(struct gt683r_led *led, u8 leds)
+{
+ int ret;
+ u8 *buffer;
+
+ buffer = kzalloc(GT683R_BUFFER_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer[0] = 0x01;
+ buffer[1] = 0x02;
+ buffer[2] = 0x30;
+ buffer[3] = leds;
+ ret = gt683r_led_snd_msg(led, buffer);
+
+ kfree(buffer);
+ return ret;
+}
+
+static int gt683r_mode_set(struct gt683r_led *led, u8 mode)
+{
+ int ret;
+ u8 *buffer;
+
+ buffer = kzalloc(GT683R_BUFFER_SIZE, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ buffer[0] = 0x01;
+ buffer[1] = 0x02;
+ buffer[2] = 0x20;
+ buffer[3] = mode;
+ buffer[4] = 0x01;
+ ret = gt683r_led_snd_msg(led, buffer);
+
+ kfree(buffer);
+ return ret;
+}
+
+static void gt683r_led_work(struct work_struct *work)
+{
+ int i;
+ u8 leds = 0;
+ u8 mode;
+ struct gt683r_led *led = container_of(work, struct gt683r_led, work);
+
+ mutex_lock(&led->lock);
+
+ for (i = 0; i < GT683R_LED_COUNT; i++) {
+ if (led->brightnesses[i])
+ leds |= BIT(i);
+ }
+
+ if (gt683r_leds_set(led, leds))
+ goto fail;
+
+ if (leds)
+ mode = led->mode;
+ else
+ mode = GT683R_LED_OFF;
+
+ gt683r_mode_set(led, mode);
+fail:
+ mutex_unlock(&led->lock);
+}
+
+static DEVICE_ATTR_RW(mode);
+
+static struct attribute *gt683r_led_attrs[] = {
+ &dev_attr_mode.attr,
+ NULL
+};
+
+static const struct attribute_group gt683r_led_group = {
+ .name = "gt683r",
+ .attrs = gt683r_led_attrs,
+};
+
+static const struct attribute_group *gt683r_led_groups[] = {
+ &gt683r_led_group,
+ NULL
+};
+
+static int gt683r_led_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int i;
+ int ret;
+ int name_sz;
+ char *name;
+ struct gt683r_led *led;
+
+ led = devm_kzalloc(&hdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ mutex_init(&led->lock);
+ INIT_WORK(&led->work, gt683r_led_work);
+
+ led->mode = GT683R_LED_NORMAL;
+ led->hdev = hdev;
+ hid_set_drvdata(hdev, led);
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid parsing failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ for (i = 0; i < GT683R_LED_COUNT; i++) {
+ name_sz = strlen(dev_name(&hdev->dev)) +
+ strlen(gt683r_panel_names[i]) + 3;
+
+ name = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ snprintf(name, name_sz, "%s::%s",
+ dev_name(&hdev->dev), gt683r_panel_names[i]);
+ led->led_devs[i].name = name;
+ led->led_devs[i].max_brightness = 1;
+ led->led_devs[i].brightness_set = gt683r_brightness_set;
+ led->led_devs[i].groups = gt683r_led_groups;
+
+ ret = led_classdev_register(&hdev->dev, &led->led_devs[i]);
+ if (ret) {
+ hid_err(hdev, "could not register led device\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ for (i = i - 1; i >= 0; i--)
+ led_classdev_unregister(&led->led_devs[i]);
+ hid_hw_stop(hdev);
+ return ret;
+}
+
+static void gt683r_led_remove(struct hid_device *hdev)
+{
+ int i;
+ struct gt683r_led *led = hid_get_drvdata(hdev);
+
+ for (i = 0; i < GT683R_LED_COUNT; i++)
+ led_classdev_unregister(&led->led_devs[i]);
+ flush_work(&led->work);
+ hid_hw_stop(hdev);
+}
+
+static struct hid_driver gt683r_led_driver = {
+ .probe = gt683r_led_probe,
+ .remove = gt683r_led_remove,
+ .name = "gt683r_led",
+ .id_table = gt683r_led_id,
+};
+
+module_hid_driver(gt683r_led_driver);
+
+MODULE_AUTHOR("Janne Kanniainen");
+MODULE_DESCRIPTION("MSI GT683R led driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-huion.c b/drivers/hid/hid-huion.c
index cbf4da4689ba..60f44cd1b0ed 100644
--- a/drivers/hid/hid-huion.c
+++ b/drivers/hid/hid-huion.c
@@ -2,6 +2,7 @@
* HID driver for Huion devices not fully compliant with HID standard
*
* Copyright (c) 2013 Martin Rusko
+ * Copyright (c) 2014 Nikolai Kondrashov
*/
/*
@@ -15,67 +16,89 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/usb.h>
+#include <asm/unaligned.h>
#include "usbhid/usbhid.h"
#include "hid-ids.h"
-/* Original Huion 580 report descriptor size */
-#define HUION_580_RDESC_ORIG_SIZE 177
-
-/* Fixed Huion 580 report descriptor */
-static __u8 huion_580_rdesc_fixed[] = {
- 0x05, 0x0D, /* Usage Page (Digitizer), */
- 0x09, 0x02, /* Usage (Pen), */
- 0xA1, 0x01, /* Collection (Application), */
- 0x85, 0x07, /* Report ID (7), */
- 0x09, 0x20, /* Usage (Stylus), */
- 0xA0, /* Collection (Physical), */
- 0x14, /* Logical Minimum (0), */
- 0x25, 0x01, /* Logical Maximum (1), */
- 0x75, 0x01, /* Report Size (1), */
- 0x09, 0x42, /* Usage (Tip Switch), */
- 0x09, 0x44, /* Usage (Barrel Switch), */
- 0x09, 0x46, /* Usage (Tablet Pick), */
- 0x95, 0x03, /* Report Count (3), */
- 0x81, 0x02, /* Input (Variable), */
- 0x95, 0x03, /* Report Count (3), */
- 0x81, 0x03, /* Input (Constant, Variable), */
- 0x09, 0x32, /* Usage (In Range), */
- 0x95, 0x01, /* Report Count (1), */
- 0x81, 0x02, /* Input (Variable), */
- 0x95, 0x01, /* Report Count (1), */
- 0x81, 0x03, /* Input (Constant, Variable), */
- 0x75, 0x10, /* Report Size (16), */
- 0x95, 0x01, /* Report Count (1), */
- 0xA4, /* Push, */
- 0x05, 0x01, /* Usage Page (Desktop), */
- 0x65, 0x13, /* Unit (Inch), */
- 0x55, 0xFD, /* Unit Exponent (-3), */
- 0x34, /* Physical Minimum (0), */
- 0x09, 0x30, /* Usage (X), */
- 0x46, 0x40, 0x1F, /* Physical Maximum (8000), */
- 0x26, 0x00, 0x7D, /* Logical Maximum (32000), */
- 0x81, 0x02, /* Input (Variable), */
- 0x09, 0x31, /* Usage (Y), */
- 0x46, 0x88, 0x13, /* Physical Maximum (5000), */
- 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */
- 0x81, 0x02, /* Input (Variable), */
- 0xB4, /* Pop, */
- 0x09, 0x30, /* Usage (Tip Pressure), */
- 0x26, 0xFF, 0x07, /* Logical Maximum (2047), */
- 0x81, 0x02, /* Input (Variable), */
- 0xC0, /* End Collection, */
- 0xC0 /* End Collection */
+/* Report descriptor template placeholder head */
+#define HUION_PH_HEAD 0xFE, 0xED, 0x1D
+
+/* Report descriptor template placeholder IDs */
+enum huion_ph_id {
+ HUION_PH_ID_X_LM,
+ HUION_PH_ID_X_PM,
+ HUION_PH_ID_Y_LM,
+ HUION_PH_ID_Y_PM,
+ HUION_PH_ID_PRESSURE_LM,
+ HUION_PH_ID_NUM
+};
+
+/* Report descriptor template placeholder */
+#define HUION_PH(_ID) HUION_PH_HEAD, HUION_PH_ID_##_ID
+
+/* Fixed report descriptor template */
+static const __u8 huion_tablet_rdesc_template[] = {
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x02, /* Usage (Pen), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x07, /* Report ID (7), */
+ 0x09, 0x20, /* Usage (Stylus), */
+ 0xA0, /* Collection (Physical), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x09, 0x44, /* Usage (Barrel Switch), */
+ 0x09, 0x46, /* Usage (Tablet Pick), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x09, 0x32, /* Usage (In Range), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xA4, /* Push, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x55, 0xFD, /* Unit Exponent (-3), */
+ 0x34, /* Physical Minimum (0), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x27, HUION_PH(X_LM), /* Logical Maximum (PLACEHOLDER), */
+ 0x47, HUION_PH(X_PM), /* Physical Maximum (PLACEHOLDER), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x27, HUION_PH(Y_LM), /* Logical Maximum (PLACEHOLDER), */
+ 0x47, HUION_PH(Y_PM), /* Physical Maximum (PLACEHOLDER), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xB4, /* Pop, */
+ 0x09, 0x30, /* Usage (Tip Pressure), */
+ 0x27,
+ HUION_PH(PRESSURE_LM), /* Logical Maximum (PLACEHOLDER), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+
+/* Driver data */
+struct huion_drvdata {
+ __u8 *rdesc;
+ unsigned int rsize;
};
static __u8 *huion_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
+ struct huion_drvdata *drvdata = hid_get_drvdata(hdev);
switch (hdev->product) {
- case USB_DEVICE_ID_HUION_580:
- if (*rsize == HUION_580_RDESC_ORIG_SIZE) {
- rdesc = huion_580_rdesc_fixed;
- *rsize = sizeof(huion_580_rdesc_fixed);
+ case USB_DEVICE_ID_HUION_TABLET:
+ if (drvdata->rdesc != NULL) {
+ rdesc = drvdata->rdesc;
+ *rsize = drvdata->rsize;
}
break;
}
@@ -83,82 +106,144 @@ static __u8 *huion_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
/**
- * Enable fully-functional tablet mode by reading special string
- * descriptor.
+ * Enable fully-functional tablet mode and determine device parameters.
*
* @hdev: HID device
- *
- * The specific string descriptor and data were discovered by sniffing
- * the Windows driver traffic.
*/
static int huion_tablet_enable(struct hid_device *hdev)
{
int rc;
- char buf[22];
+ struct usb_device *usb_dev = hid_to_usb_dev(hdev);
+ struct huion_drvdata *drvdata = hid_get_drvdata(hdev);
+ __le16 buf[6];
- rc = usb_string(hid_to_usb_dev(hdev), 0x64, buf, sizeof(buf));
- if (rc < 0)
- return rc;
+ /*
+ * Read string descriptor containing tablet parameters. The specific
+ * string descriptor and data were discovered by sniffing the Windows
+ * driver traffic.
+ * NOTE: This enables fully-functional tablet mode.
+ */
+ rc = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+ (USB_DT_STRING << 8) + 0x64,
+ 0x0409, buf, sizeof(buf),
+ USB_CTRL_GET_TIMEOUT);
+ if (rc == -EPIPE)
+ hid_warn(hdev, "device parameters not found\n");
+ else if (rc < 0)
+ hid_warn(hdev, "failed to get device parameters: %d\n", rc);
+ else if (rc != sizeof(buf))
+ hid_warn(hdev, "invalid device parameters\n");
+ else {
+ s32 params[HUION_PH_ID_NUM];
+ s32 resolution;
+ __u8 *p;
+ s32 v;
+
+ /* Extract device parameters */
+ params[HUION_PH_ID_X_LM] = le16_to_cpu(buf[1]);
+ params[HUION_PH_ID_Y_LM] = le16_to_cpu(buf[2]);
+ params[HUION_PH_ID_PRESSURE_LM] = le16_to_cpu(buf[4]);
+ resolution = le16_to_cpu(buf[5]);
+ if (resolution == 0) {
+ params[HUION_PH_ID_X_PM] = 0;
+ params[HUION_PH_ID_Y_PM] = 0;
+ } else {
+ params[HUION_PH_ID_X_PM] = params[HUION_PH_ID_X_LM] *
+ 1000 / resolution;
+ params[HUION_PH_ID_Y_PM] = params[HUION_PH_ID_Y_LM] *
+ 1000 / resolution;
+ }
+
+ /* Allocate fixed report descriptor */
+ drvdata->rdesc = devm_kmalloc(&hdev->dev,
+ sizeof(huion_tablet_rdesc_template),
+ GFP_KERNEL);
+ if (drvdata->rdesc == NULL) {
+ hid_err(hdev, "failed to allocate fixed rdesc\n");
+ return -ENOMEM;
+ }
+ drvdata->rsize = sizeof(huion_tablet_rdesc_template);
+
+ /* Format fixed report descriptor */
+ memcpy(drvdata->rdesc, huion_tablet_rdesc_template,
+ drvdata->rsize);
+ for (p = drvdata->rdesc;
+ p <= drvdata->rdesc + drvdata->rsize - 4;) {
+ if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
+ p[3] < sizeof(params)) {
+ v = params[p[3]];
+ put_unaligned(cpu_to_le32(v), (s32 *)p);
+ p += 4;
+ } else {
+ p++;
+ }
+ }
+ }
return 0;
}
static int huion_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
- int ret;
+ int rc;
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct huion_drvdata *drvdata;
+
+ /* Allocate and assign driver data */
+ drvdata = devm_kzalloc(&hdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (drvdata == NULL) {
+ hid_err(hdev, "failed to allocate driver data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, drvdata);
- /* Ignore interfaces 1 (mouse) and 2 (keyboard) for Huion 580 tablet,
- * as they are not used
- */
switch (id->product) {
- case USB_DEVICE_ID_HUION_580:
- if (intf->cur_altsetting->desc.bInterfaceNumber != 0x00)
- return -ENODEV;
+ case USB_DEVICE_ID_HUION_TABLET:
+ /* If this is the pen interface */
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 0) {
+ rc = huion_tablet_enable(hdev);
+ if (rc) {
+ hid_err(hdev, "tablet enabling failed\n");
+ return rc;
+ }
+ }
break;
}
- ret = hid_parse(hdev);
- if (ret) {
+ rc = hid_parse(hdev);
+ if (rc) {
hid_err(hdev, "parse failed\n");
- goto err;
+ return rc;
}
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
- if (ret) {
+ rc = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (rc) {
hid_err(hdev, "hw start failed\n");
- goto err;
- }
-
- switch (id->product) {
- case USB_DEVICE_ID_HUION_580:
- ret = huion_tablet_enable(hdev);
- if (ret) {
- hid_err(hdev, "tablet enabling failed\n");
- goto enabling_err;
- }
- break;
+ return rc;
}
return 0;
-enabling_err:
- hid_hw_stop(hdev);
-err:
- return ret;
}
static int huion_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
- /* If this is a pen input report then invert the in-range bit */
- if (report->type == HID_INPUT_REPORT && report->id == 0x07 && size >= 2)
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ /* If this is a pen input report */
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 0 &&
+ report->type == HID_INPUT_REPORT &&
+ report->id == 0x07 && size >= 2)
+ /* Invert the in-range bit */
data[1] ^= 0x40;
return 0;
}
static const struct hid_device_id huion_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
{ }
};
MODULE_DEVICE_TABLE(hid, huion_devices);
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index f52dbcb7133b..31fad641b744 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -308,6 +308,9 @@ static void mousevsc_on_receive(struct hv_device *device,
memcpy(input_dev->input_buf, input_report->buffer, len);
hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
input_dev->input_buf, len, 1);
+
+ pm_wakeup_event(&input_dev->device->device, 0);
+
break;
default:
pr_err("unsupported hid msg type - type %d len %d",
@@ -549,6 +552,8 @@ static int mousevsc_probe(struct hv_device *device,
goto probe_err2;
}
+ device_init_wakeup(&device->device, true);
+
input_dev->connected = true;
input_dev->init_complete = true;
@@ -571,6 +576,7 @@ static int mousevsc_remove(struct hv_device *dev)
{
struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
+ device_init_wakeup(&dev->device, false);
vmbus_close(dev->channel);
hid_hw_stop(input_dev->hid_device);
hid_destroy_device(input_dev->hid_device);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 48b66bbffc94..25cd674d6064 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -448,7 +448,7 @@
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030
#define USB_VENDOR_ID_HUION 0x256c
-#define USB_DEVICE_ID_HUION_580 0x006e
+#define USB_DEVICE_ID_HUION_TABLET 0x006e
#define USB_VENDOR_ID_IDEACOM 0x1cb6
#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
@@ -479,6 +479,7 @@
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A070 0xa070
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072 0xa072
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
+#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -489,6 +490,7 @@
#define USB_VENDOR_ID_JABRA 0x0b0e
#define USB_DEVICE_ID_JABRA_SPEAK_410 0x0412
#define USB_DEVICE_ID_JABRA_SPEAK_510 0x0420
+#define USB_DEVICE_ID_JABRA_GN9350E 0x9350
#define USB_VENDOR_ID_JESS 0x0c45
#define USB_DEVICE_ID_JESS_YUREX 0x1010
@@ -561,6 +563,8 @@
#define USB_VENDOR_ID_LENOVO 0x17ef
#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
+#define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047
+#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
@@ -646,7 +650,7 @@
#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
#define USB_VENDOR_ID_MSI 0x1770
-#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
+#define USB_DEVICE_ID_MSI_GT683R_LED_PANEL 0xff00
#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
#define USB_DEVICE_ID_N_S_HARMONY 0xc359
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
deleted file mode 100644
index 2d25b6cbbc05..000000000000
--- a/drivers/hid/hid-lenovo-tpkbd.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/*
- * HID driver for Lenovo ThinkPad USB Keyboard with TrackPoint
- *
- * Copyright (c) 2012 Bernhard Seibold
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/module.h>
-#include <linux/sysfs.h>
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/input.h>
-#include <linux/leds.h>
-
-#include "hid-ids.h"
-
-/* This is only used for the trackpoint part of the driver, hence _tp */
-struct tpkbd_data_pointer {
- int led_state;
- struct led_classdev led_mute;
- struct led_classdev led_micmute;
- int press_to_select;
- int dragging;
- int release_to_select;
- int select_right;
- int sensitivity;
- int press_speed;
-};
-
-#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
-
-static int tpkbd_input_mapping(struct hid_device *hdev,
- struct hid_input *hi, struct hid_field *field,
- struct hid_usage *usage, unsigned long **bit, int *max)
-{
- if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
- /* mark the device as pointer */
- hid_set_drvdata(hdev, (void *)1);
- map_key_clear(KEY_MICMUTE);
- return 1;
- }
- return 0;
-}
-
-#undef map_key_clear
-
-static int tpkbd_features_set(struct hid_device *hdev)
-{
- struct hid_report *report;
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4];
-
- report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02;
- report->field[0]->value[0] |= data_pointer->dragging ? 0x04 : 0x08;
- report->field[0]->value[0] |= data_pointer->release_to_select ? 0x10 : 0x20;
- report->field[0]->value[0] |= data_pointer->select_right ? 0x80 : 0x40;
- report->field[1]->value[0] = 0x03; // unknown setting, imitate windows driver
- report->field[2]->value[0] = data_pointer->sensitivity;
- report->field[3]->value[0] = data_pointer->press_speed;
-
- hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
- return 0;
-}
-
-static ssize_t pointer_press_to_select_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
-}
-
-static ssize_t pointer_press_to_select_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- int value;
-
- if (kstrtoint(buf, 10, &value))
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
-
- data_pointer->press_to_select = value;
- tpkbd_features_set(hdev);
-
- return count;
-}
-
-static ssize_t pointer_dragging_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
-}
-
-static ssize_t pointer_dragging_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- int value;
-
- if (kstrtoint(buf, 10, &value))
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
-
- data_pointer->dragging = value;
- tpkbd_features_set(hdev);
-
- return count;
-}
-
-static ssize_t pointer_release_to_select_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
-}
-
-static ssize_t pointer_release_to_select_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- int value;
-
- if (kstrtoint(buf, 10, &value))
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
-
- data_pointer->release_to_select = value;
- tpkbd_features_set(hdev);
-
- return count;
-}
-
-static ssize_t pointer_select_right_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
-}
-
-static ssize_t pointer_select_right_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- int value;
-
- if (kstrtoint(buf, 10, &value))
- return -EINVAL;
- if (value < 0 || value > 1)
- return -EINVAL;
-
- data_pointer->select_right = value;
- tpkbd_features_set(hdev);
-
- return count;
-}
-
-static ssize_t pointer_sensitivity_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n",
- data_pointer->sensitivity);
-}
-
-static ssize_t pointer_sensitivity_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- int value;
-
- if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
- return -EINVAL;
-
- data_pointer->sensitivity = value;
- tpkbd_features_set(hdev);
-
- return count;
-}
-
-static ssize_t pointer_press_speed_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "%u\n",
- data_pointer->press_speed);
-}
-
-static ssize_t pointer_press_speed_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- int value;
-
- if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
- return -EINVAL;
-
- data_pointer->press_speed = value;
- tpkbd_features_set(hdev);
-
- return count;
-}
-
-static struct device_attribute dev_attr_pointer_press_to_select =
- __ATTR(press_to_select, S_IWUSR | S_IRUGO,
- pointer_press_to_select_show,
- pointer_press_to_select_store);
-
-static struct device_attribute dev_attr_pointer_dragging =
- __ATTR(dragging, S_IWUSR | S_IRUGO,
- pointer_dragging_show,
- pointer_dragging_store);
-
-static struct device_attribute dev_attr_pointer_release_to_select =
- __ATTR(release_to_select, S_IWUSR | S_IRUGO,
- pointer_release_to_select_show,
- pointer_release_to_select_store);
-
-static struct device_attribute dev_attr_pointer_select_right =
- __ATTR(select_right, S_IWUSR | S_IRUGO,
- pointer_select_right_show,
- pointer_select_right_store);
-
-static struct device_attribute dev_attr_pointer_sensitivity =
- __ATTR(sensitivity, S_IWUSR | S_IRUGO,
- pointer_sensitivity_show,
- pointer_sensitivity_store);
-
-static struct device_attribute dev_attr_pointer_press_speed =
- __ATTR(press_speed, S_IWUSR | S_IRUGO,
- pointer_press_speed_show,
- pointer_press_speed_store);
-
-static struct attribute *tpkbd_attributes_pointer[] = {
- &dev_attr_pointer_press_to_select.attr,
- &dev_attr_pointer_dragging.attr,
- &dev_attr_pointer_release_to_select.attr,
- &dev_attr_pointer_select_right.attr,
- &dev_attr_pointer_sensitivity.attr,
- &dev_attr_pointer_press_speed.attr,
- NULL
-};
-
-static const struct attribute_group tpkbd_attr_group_pointer = {
- .attrs = tpkbd_attributes_pointer,
-};
-
-static enum led_brightness tpkbd_led_brightness_get(
- struct led_classdev *led_cdev)
-{
- struct device *dev = led_cdev->dev->parent;
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- int led_nr = 0;
-
- if (led_cdev == &data_pointer->led_micmute)
- led_nr = 1;
-
- return data_pointer->led_state & (1 << led_nr)
- ? LED_FULL
- : LED_OFF;
-}
-
-static void tpkbd_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct device *dev = led_cdev->dev->parent;
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
- struct hid_report *report;
- int led_nr = 0;
-
- if (led_cdev == &data_pointer->led_micmute)
- led_nr = 1;
-
- if (value == LED_OFF)
- data_pointer->led_state &= ~(1 << led_nr);
- else
- data_pointer->led_state |= 1 << led_nr;
-
- report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
- report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
- report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
- hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
-}
-
-static int tpkbd_probe_tp(struct hid_device *hdev)
-{
- struct device *dev = &hdev->dev;
- struct tpkbd_data_pointer *data_pointer;
- size_t name_sz = strlen(dev_name(dev)) + 16;
- char *name_mute, *name_micmute;
- int i;
-
- /* Validate required reports. */
- for (i = 0; i < 4; i++) {
- if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
- return -ENODEV;
- }
- if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
- return -ENODEV;
-
- if (sysfs_create_group(&hdev->dev.kobj,
- &tpkbd_attr_group_pointer)) {
- hid_warn(hdev, "Could not create sysfs group\n");
- }
-
- data_pointer = devm_kzalloc(&hdev->dev,
- sizeof(struct tpkbd_data_pointer),
- GFP_KERNEL);
- if (data_pointer == NULL) {
- hid_err(hdev, "Could not allocate memory for driver data\n");
- return -ENOMEM;
- }
-
- // set same default values as windows driver
- data_pointer->sensitivity = 0xa0;
- data_pointer->press_speed = 0x38;
-
- name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
- name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
- if (name_mute == NULL || name_micmute == NULL) {
- hid_err(hdev, "Could not allocate memory for led data\n");
- return -ENOMEM;
- }
- snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
- snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
-
- hid_set_drvdata(hdev, data_pointer);
-
- data_pointer->led_mute.name = name_mute;
- data_pointer->led_mute.brightness_get = tpkbd_led_brightness_get;
- data_pointer->led_mute.brightness_set = tpkbd_led_brightness_set;
- data_pointer->led_mute.dev = dev;
- led_classdev_register(dev, &data_pointer->led_mute);
-
- data_pointer->led_micmute.name = name_micmute;
- data_pointer->led_micmute.brightness_get = tpkbd_led_brightness_get;
- data_pointer->led_micmute.brightness_set = tpkbd_led_brightness_set;
- data_pointer->led_micmute.dev = dev;
- led_classdev_register(dev, &data_pointer->led_micmute);
-
- tpkbd_features_set(hdev);
-
- return 0;
-}
-
-static int tpkbd_probe(struct hid_device *hdev,
- const struct hid_device_id *id)
-{
- int ret;
-
- ret = hid_parse(hdev);
- if (ret) {
- hid_err(hdev, "hid_parse failed\n");
- goto err;
- }
-
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
- if (ret) {
- hid_err(hdev, "hid_hw_start failed\n");
- goto err;
- }
-
- if (hid_get_drvdata(hdev)) {
- hid_set_drvdata(hdev, NULL);
- ret = tpkbd_probe_tp(hdev);
- if (ret)
- goto err_hid;
- }
-
- return 0;
-err_hid:
- hid_hw_stop(hdev);
-err:
- return ret;
-}
-
-static void tpkbd_remove_tp(struct hid_device *hdev)
-{
- struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev);
-
- sysfs_remove_group(&hdev->dev.kobj,
- &tpkbd_attr_group_pointer);
-
- led_classdev_unregister(&data_pointer->led_micmute);
- led_classdev_unregister(&data_pointer->led_mute);
-
- hid_set_drvdata(hdev, NULL);
-}
-
-static void tpkbd_remove(struct hid_device *hdev)
-{
- if (hid_get_drvdata(hdev))
- tpkbd_remove_tp(hdev);
-
- hid_hw_stop(hdev);
-}
-
-static const struct hid_device_id tpkbd_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
- { }
-};
-
-MODULE_DEVICE_TABLE(hid, tpkbd_devices);
-
-static struct hid_driver tpkbd_driver = {
- .name = "lenovo_tpkbd",
- .id_table = tpkbd_devices,
- .input_mapping = tpkbd_input_mapping,
- .probe = tpkbd_probe,
- .remove = tpkbd_remove,
-};
-module_hid_driver(tpkbd_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
new file mode 100644
index 000000000000..bf227f7679af
--- /dev/null
+++ b/drivers/hid/hid-lenovo.c
@@ -0,0 +1,708 @@
+/*
+ * HID driver for Lenovo:
+ * - ThinkPad USB Keyboard with TrackPoint (tpkbd)
+ * - ThinkPad Compact Bluetooth Keyboard with TrackPoint (cptkbd)
+ * - ThinkPad Compact USB Keyboard with TrackPoint (cptkbd)
+ *
+ * Copyright (c) 2012 Bernhard Seibold
+ * Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+
+#include "hid-ids.h"
+
+struct lenovo_drvdata_tpkbd {
+ int led_state;
+ struct led_classdev led_mute;
+ struct led_classdev led_micmute;
+ int press_to_select;
+ int dragging;
+ int release_to_select;
+ int select_right;
+ int sensitivity;
+ int press_speed;
+};
+
+struct lenovo_drvdata_cptkbd {
+ bool fn_lock;
+};
+
+#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ /* This sub-device contains trackpoint, mark it */
+ hid_set_drvdata(hdev, (void *)1);
+ map_key_clear(KEY_MICMUTE);
+ return 1;
+ }
+ return 0;
+}
+
+static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ /* HID_UP_LNVENDOR = USB, HID_UP_MSVENDOR = BT */
+ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR ||
+ (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
+ set_bit(EV_REP, hi->input->evbit);
+ switch (usage->hid & HID_USAGE) {
+ case 0x00f1: /* Fn-F4: Mic mute */
+ map_key_clear(KEY_MICMUTE);
+ return 1;
+ case 0x00f2: /* Fn-F5: Brightness down */
+ map_key_clear(KEY_BRIGHTNESSDOWN);
+ return 1;
+ case 0x00f3: /* Fn-F6: Brightness up */
+ map_key_clear(KEY_BRIGHTNESSUP);
+ return 1;
+ case 0x00f4: /* Fn-F7: External display (projector) */
+ map_key_clear(KEY_SWITCHVIDEOMODE);
+ return 1;
+ case 0x00f5: /* Fn-F8: Wireless */
+ map_key_clear(KEY_WLAN);
+ return 1;
+ case 0x00f6: /* Fn-F9: Control panel */
+ map_key_clear(KEY_CONFIG);
+ return 1;
+ case 0x00f8: /* Fn-F11: View open applications (3 boxes) */
+ map_key_clear(KEY_SCALE);
+ return 1;
+ case 0x00fa: /* Fn-Esc: Fn-lock toggle */
+ map_key_clear(KEY_FN_ESC);
+ return 1;
+ case 0x00fb: /* Fn-F12: Open My computer (6 boxes) USB-only */
+ /* NB: This mapping is invented in raw_event below */
+ map_key_clear(KEY_FILE);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static int lenovo_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LENOVO_TPKBD:
+ return lenovo_input_mapping_tpkbd(hdev, hi, field,
+ usage, bit, max);
+ case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ case USB_DEVICE_ID_LENOVO_CBTKBD:
+ return lenovo_input_mapping_cptkbd(hdev, hi, field,
+ usage, bit, max);
+ default:
+ return 0;
+ }
+}
+
+#undef map_key_clear
+
+/* Send a config command to the keyboard */
+static int lenovo_send_cmd_cptkbd(struct hid_device *hdev,
+ unsigned char byte2, unsigned char byte3)
+{
+ int ret;
+ unsigned char buf[] = {0x18, byte2, byte3};
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ ret = hid_hw_raw_request(hdev, 0x13, buf, sizeof(buf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ break;
+ case USB_DEVICE_ID_LENOVO_CBTKBD:
+ ret = hid_hw_output_report(hdev, buf, sizeof(buf));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */
+}
+
+static void lenovo_features_set_cptkbd(struct hid_device *hdev)
+{
+ int ret;
+ struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock);
+ if (ret)
+ hid_err(hdev, "Fn-lock setting failed: %d\n", ret);
+}
+
+static ssize_t attr_fn_lock_show_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", cptkbd_data->fn_lock);
+}
+
+static ssize_t attr_fn_lock_store_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ cptkbd_data->fn_lock = !!value;
+ lenovo_features_set_cptkbd(hdev);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_fn_lock_cptkbd =
+ __ATTR(fn_lock, S_IWUSR | S_IRUGO,
+ attr_fn_lock_show_cptkbd,
+ attr_fn_lock_store_cptkbd);
+
+static struct attribute *lenovo_attributes_cptkbd[] = {
+ &dev_attr_fn_lock_cptkbd.attr,
+ NULL
+};
+
+static const struct attribute_group lenovo_attr_group_cptkbd = {
+ .attrs = lenovo_attributes_cptkbd,
+};
+
+static int lenovo_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ /*
+ * Compact USB keyboard's Fn-F12 report holds down many other keys, and
+ * its own key is outside the usage page range. Remove extra
+ * keypresses and remap to inside usage page.
+ */
+ if (unlikely(hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD
+ && size == 3
+ && data[0] == 0x15
+ && data[1] == 0x94
+ && data[2] == 0x01)) {
+ data[1] = 0x0;
+ data[2] = 0x4;
+ }
+
+ return 0;
+}
+
+static int lenovo_features_set_tpkbd(struct hid_device *hdev)
+{
+ struct hid_report *report;
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4];
+
+ report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02;
+ report->field[0]->value[0] |= data_pointer->dragging ? 0x04 : 0x08;
+ report->field[0]->value[0] |= data_pointer->release_to_select ? 0x10 : 0x20;
+ report->field[0]->value[0] |= data_pointer->select_right ? 0x80 : 0x40;
+ report->field[1]->value[0] = 0x03; // unknown setting, imitate windows driver
+ report->field[2]->value[0] = data_pointer->sensitivity;
+ report->field[3]->value[0] = data_pointer->press_speed;
+
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+ return 0;
+}
+
+static ssize_t attr_press_to_select_show_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
+}
+
+static ssize_t attr_press_to_select_store_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->press_to_select = value;
+ lenovo_features_set_tpkbd(hdev);
+
+ return count;
+}
+
+static ssize_t attr_dragging_show_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
+}
+
+static ssize_t attr_dragging_store_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->dragging = value;
+ lenovo_features_set_tpkbd(hdev);
+
+ return count;
+}
+
+static ssize_t attr_release_to_select_show_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
+}
+
+static ssize_t attr_release_to_select_store_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->release_to_select = value;
+ lenovo_features_set_tpkbd(hdev);
+
+ return count;
+}
+
+static ssize_t attr_select_right_show_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
+}
+
+static ssize_t attr_select_right_store_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->select_right = value;
+ lenovo_features_set_tpkbd(hdev);
+
+ return count;
+}
+
+static ssize_t attr_sensitivity_show_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->sensitivity);
+}
+
+static ssize_t attr_sensitivity_store_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->sensitivity = value;
+ lenovo_features_set_tpkbd(hdev);
+
+ return count;
+}
+
+static ssize_t attr_press_speed_show_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->press_speed);
+}
+
+static ssize_t attr_press_speed_store_tpkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->press_speed = value;
+ lenovo_features_set_tpkbd(hdev);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_press_to_select_tpkbd =
+ __ATTR(press_to_select, S_IWUSR | S_IRUGO,
+ attr_press_to_select_show_tpkbd,
+ attr_press_to_select_store_tpkbd);
+
+static struct device_attribute dev_attr_dragging_tpkbd =
+ __ATTR(dragging, S_IWUSR | S_IRUGO,
+ attr_dragging_show_tpkbd,
+ attr_dragging_store_tpkbd);
+
+static struct device_attribute dev_attr_release_to_select_tpkbd =
+ __ATTR(release_to_select, S_IWUSR | S_IRUGO,
+ attr_release_to_select_show_tpkbd,
+ attr_release_to_select_store_tpkbd);
+
+static struct device_attribute dev_attr_select_right_tpkbd =
+ __ATTR(select_right, S_IWUSR | S_IRUGO,
+ attr_select_right_show_tpkbd,
+ attr_select_right_store_tpkbd);
+
+static struct device_attribute dev_attr_sensitivity_tpkbd =
+ __ATTR(sensitivity, S_IWUSR | S_IRUGO,
+ attr_sensitivity_show_tpkbd,
+ attr_sensitivity_store_tpkbd);
+
+static struct device_attribute dev_attr_press_speed_tpkbd =
+ __ATTR(press_speed, S_IWUSR | S_IRUGO,
+ attr_press_speed_show_tpkbd,
+ attr_press_speed_store_tpkbd);
+
+static struct attribute *lenovo_attributes_tpkbd[] = {
+ &dev_attr_press_to_select_tpkbd.attr,
+ &dev_attr_dragging_tpkbd.attr,
+ &dev_attr_release_to_select_tpkbd.attr,
+ &dev_attr_select_right_tpkbd.attr,
+ &dev_attr_sensitivity_tpkbd.attr,
+ &dev_attr_press_speed_tpkbd.attr,
+ NULL
+};
+
+static const struct attribute_group lenovo_attr_group_tpkbd = {
+ .attrs = lenovo_attributes_tpkbd,
+};
+
+static enum led_brightness lenovo_led_brightness_get_tpkbd(
+ struct led_classdev *led_cdev)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ int led_nr = 0;
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ return data_pointer->led_state & (1 << led_nr)
+ ? LED_FULL
+ : LED_OFF;
+}
+
+static void lenovo_led_brightness_set_tpkbd(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev = led_cdev->dev->parent;
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+ struct hid_report *report;
+ int led_nr = 0;
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ if (value == LED_OFF)
+ data_pointer->led_state &= ~(1 << led_nr);
+ else
+ data_pointer->led_state |= 1 << led_nr;
+
+ report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
+ report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
+ report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
+ hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
+}
+
+static int lenovo_probe_tpkbd(struct hid_device *hdev)
+{
+ struct device *dev = &hdev->dev;
+ struct lenovo_drvdata_tpkbd *data_pointer;
+ size_t name_sz = strlen(dev_name(dev)) + 16;
+ char *name_mute, *name_micmute;
+ int i;
+ int ret;
+
+ /*
+ * Only register extra settings against subdevice where input_mapping
+ * set drvdata to 1, i.e. the trackpoint.
+ */
+ if (!hid_get_drvdata(hdev))
+ return 0;
+
+ hid_set_drvdata(hdev, NULL);
+
+ /* Validate required reports. */
+ for (i = 0; i < 4; i++) {
+ if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
+ return -ENODEV;
+ }
+ if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
+ return -ENODEV;
+
+ ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tpkbd);
+ if (ret)
+ hid_warn(hdev, "Could not create sysfs group: %d\n", ret);
+
+ data_pointer = devm_kzalloc(&hdev->dev,
+ sizeof(struct lenovo_drvdata_tpkbd),
+ GFP_KERNEL);
+ if (data_pointer == NULL) {
+ hid_err(hdev, "Could not allocate memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ // set same default values as windows driver
+ data_pointer->sensitivity = 0xa0;
+ data_pointer->press_speed = 0x38;
+
+ name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+ name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+ if (name_mute == NULL || name_micmute == NULL) {
+ hid_err(hdev, "Could not allocate memory for led data\n");
+ return -ENOMEM;
+ }
+ snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
+ snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
+
+ hid_set_drvdata(hdev, data_pointer);
+
+ data_pointer->led_mute.name = name_mute;
+ data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
+ data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
+ data_pointer->led_mute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_mute);
+
+ data_pointer->led_micmute.name = name_micmute;
+ data_pointer->led_micmute.brightness_get =
+ lenovo_led_brightness_get_tpkbd;
+ data_pointer->led_micmute.brightness_set =
+ lenovo_led_brightness_set_tpkbd;
+ data_pointer->led_micmute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_micmute);
+
+ lenovo_features_set_tpkbd(hdev);
+
+ return 0;
+}
+
+static int lenovo_probe_cptkbd(struct hid_device *hdev)
+{
+ int ret;
+ struct lenovo_drvdata_cptkbd *cptkbd_data;
+
+ /* All the custom action happens on the USBMOUSE device for USB */
+ if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD
+ && hdev->type != HID_TYPE_USBMOUSE) {
+ hid_dbg(hdev, "Ignoring keyboard half of device\n");
+ return 0;
+ }
+
+ cptkbd_data = devm_kzalloc(&hdev->dev,
+ sizeof(*cptkbd_data),
+ GFP_KERNEL);
+ if (cptkbd_data == NULL) {
+ hid_err(hdev, "can't alloc keyboard descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, cptkbd_data);
+
+ /*
+ * Tell the keyboard a driver understands it, and turn F7, F9, F11 into
+ * regular keys
+ */
+ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03);
+ if (ret)
+ hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret);
+
+ /* Turn Fn-Lock on by default */
+ cptkbd_data->fn_lock = true;
+ lenovo_features_set_cptkbd(hdev);
+
+ ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd);
+ if (ret)
+ hid_warn(hdev, "Could not create sysfs group: %d\n", ret);
+
+ return 0;
+}
+
+static int lenovo_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid_parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hid_hw_start failed\n");
+ goto err;
+ }
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LENOVO_TPKBD:
+ ret = lenovo_probe_tpkbd(hdev);
+ break;
+ case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ case USB_DEVICE_ID_LENOVO_CBTKBD:
+ ret = lenovo_probe_cptkbd(hdev);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+ if (ret)
+ goto err_hid;
+
+ return 0;
+err_hid:
+ hid_hw_stop(hdev);
+err:
+ return ret;
+}
+
+static void lenovo_remove_tpkbd(struct hid_device *hdev)
+{
+ struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev);
+
+ /*
+ * Only the trackpoint half of the keyboard has drvdata and stuff that
+ * needs unregistering.
+ */
+ if (data_pointer == NULL)
+ return;
+
+ sysfs_remove_group(&hdev->dev.kobj,
+ &lenovo_attr_group_tpkbd);
+
+ led_classdev_unregister(&data_pointer->led_micmute);
+ led_classdev_unregister(&data_pointer->led_mute);
+
+ hid_set_drvdata(hdev, NULL);
+}
+
+static void lenovo_remove_cptkbd(struct hid_device *hdev)
+{
+ sysfs_remove_group(&hdev->dev.kobj,
+ &lenovo_attr_group_cptkbd);
+}
+
+static void lenovo_remove(struct hid_device *hdev)
+{
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LENOVO_TPKBD:
+ lenovo_remove_tpkbd(hdev);
+ break;
+ case USB_DEVICE_ID_LENOVO_CUSBKBD:
+ case USB_DEVICE_ID_LENOVO_CBTKBD:
+ lenovo_remove_cptkbd(hdev);
+ break;
+ }
+
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id lenovo_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, lenovo_devices);
+
+static struct hid_driver lenovo_driver = {
+ .name = "lenovo",
+ .id_table = lenovo_devices,
+ .input_mapping = lenovo_input_mapping,
+ .probe = lenovo_probe,
+ .remove = lenovo_remove,
+ .raw_event = lenovo_raw_event,
+};
+module_hid_driver(lenovo_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-picolcd_debugfs.c b/drivers/hid/hid-picolcd_debugfs.c
index 024cdf3c2297..3c13af684410 100644
--- a/drivers/hid/hid-picolcd_debugfs.c
+++ b/drivers/hid/hid-picolcd_debugfs.c
@@ -883,16 +883,13 @@ void picolcd_exit_devfs(struct picolcd_data *data)
dent = data->debug_reset;
data->debug_reset = NULL;
- if (dent)
- debugfs_remove(dent);
+ debugfs_remove(dent);
dent = data->debug_eeprom;
data->debug_eeprom = NULL;
- if (dent)
- debugfs_remove(dent);
+ debugfs_remove(dent);
dent = data->debug_flash;
data->debug_flash = NULL;
- if (dent)
- debugfs_remove(dent);
+ debugfs_remove(dent);
mutex_destroy(&data->mutex_flash);
}
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 578bbe65902b..0dc25142f451 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -377,7 +377,7 @@ static int rmi_input_event(struct hid_device *hdev, u8 *data, int size)
irq_mask |= hdata->f30.irq_mask;
if (data[1] & ~irq_mask)
- hid_warn(hdev, "unknown intr source:%02lx %s:%d\n",
+ hid_dbg(hdev, "unknown intr source:%02lx %s:%d\n",
data[1] & ~irq_mask, __FILE__, __LINE__);
if (hdata->f11.interrupt_base < hdata->f30.interrupt_base) {
@@ -400,7 +400,7 @@ static int rmi_read_data_event(struct hid_device *hdev, u8 *data, int size)
struct rmi_data *hdata = hid_get_drvdata(hdev);
if (!test_bit(RMI_READ_REQUEST_PENDING, &hdata->flags)) {
- hid_err(hdev, "no read request pending\n");
+ hid_dbg(hdev, "no read request pending\n");
return 0;
}
@@ -549,10 +549,12 @@ static int rmi_populate_f11(struct hid_device *hdev)
u8 buf[20];
int ret;
bool has_query9;
- bool has_query10;
+ bool has_query10 = false;
bool has_query11;
bool has_query12;
bool has_physical_props;
+ bool has_gestures;
+ bool has_rel;
unsigned x_size, y_size;
u16 query12_offset;
@@ -589,19 +591,32 @@ static int rmi_populate_f11(struct hid_device *hdev)
return -ENODEV;
}
- /* query 8 to find out if query 10 exists */
- ret = rmi_read(hdev, data->f11.query_base_addr + 8, buf);
- if (ret) {
- hid_err(hdev, "can not read gesture information: %d.\n", ret);
- return ret;
+ has_rel = !!(buf[0] & BIT(3));
+ has_gestures = !!(buf[0] & BIT(5));
+
+ if (has_gestures) {
+ /* query 8 to find out if query 10 exists */
+ ret = rmi_read(hdev, data->f11.query_base_addr + 8, buf);
+ if (ret) {
+ hid_err(hdev, "can not read gesture information: %d.\n",
+ ret);
+ return ret;
+ }
+ has_query10 = !!(buf[0] & BIT(2));
}
- has_query10 = !!(buf[0] & BIT(2));
/*
- * At least 8 queries are guaranteed to be present in F11
- * +1 for query12.
+ * At least 4 queries are guaranteed to be present in F11
+ * +1 for query 5 which is present since absolute events are
+ * reported and +1 for query 12.
*/
- query12_offset = 9;
+ query12_offset = 6;
+
+ if (has_rel)
+ ++query12_offset; /* query 6 is present */
+
+ if (has_gestures)
+ query12_offset += 2; /* query 7 and 8 are present */
if (has_query9)
++query12_offset;
@@ -833,6 +848,8 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct rmi_data *data = NULL;
int ret;
size_t alloc_size;
+ struct hid_report *input_report;
+ struct hid_report *output_report;
data = devm_kzalloc(&hdev->dev, sizeof(struct rmi_data), GFP_KERNEL);
if (!data)
@@ -851,12 +868,26 @@ static int rmi_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
- data->input_report_size = (hdev->report_enum[HID_INPUT_REPORT]
- .report_id_hash[RMI_ATTN_REPORT_ID]->size >> 3)
- + 1 /* report id */;
- data->output_report_size = (hdev->report_enum[HID_OUTPUT_REPORT]
- .report_id_hash[RMI_WRITE_REPORT_ID]->size >> 3)
- + 1 /* report id */;
+ input_report = hdev->report_enum[HID_INPUT_REPORT]
+ .report_id_hash[RMI_ATTN_REPORT_ID];
+ if (!input_report) {
+ hid_err(hdev, "device does not have expected input report\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ data->input_report_size = (input_report->size >> 3) + 1 /* report id */;
+
+ output_report = hdev->report_enum[HID_OUTPUT_REPORT]
+ .report_id_hash[RMI_WRITE_REPORT_ID];
+ if (!output_report) {
+ hid_err(hdev, "device does not have expected output report\n");
+ ret = -ENODEV;
+ return ret;
+ }
+
+ data->output_report_size = (output_report->size >> 3)
+ + 1 /* report id */;
alloc_size = data->output_report_size + data->input_report_size;
diff --git a/drivers/hid/hid-roccat-lua.c b/drivers/hid/hid-roccat-lua.c
index 6adc0fa08d96..65e2e76bf2fe 100644
--- a/drivers/hid/hid-roccat-lua.c
+++ b/drivers/hid/hid-roccat-lua.c
@@ -61,7 +61,7 @@ static ssize_t lua_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&lua->lua_lock);
- retval = roccat_common2_send(usb_dev, command, (void *)buf, real_size);
+ retval = roccat_common2_send(usb_dev, command, buf, real_size);
mutex_unlock(&lua->lua_lock);
return retval ? retval : real_size;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 2259eaa8b988..c372368e438c 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -56,32 +56,81 @@
#define MAX_LEDS 4
-static const u8 sixaxis_rdesc_fixup[] = {
- 0x95, 0x13, 0x09, 0x01, 0x81, 0x02, 0x95, 0x0C,
- 0x81, 0x01, 0x75, 0x10, 0x95, 0x04, 0x26, 0xFF,
- 0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
-};
-
-static const u8 sixaxis_rdesc_fixup2[] = {
- 0x05, 0x01, 0x09, 0x04, 0xa1, 0x01, 0xa1, 0x02,
- 0x85, 0x01, 0x75, 0x08, 0x95, 0x01, 0x15, 0x00,
- 0x26, 0xff, 0x00, 0x81, 0x03, 0x75, 0x01, 0x95,
- 0x13, 0x15, 0x00, 0x25, 0x01, 0x35, 0x00, 0x45,
- 0x01, 0x05, 0x09, 0x19, 0x01, 0x29, 0x13, 0x81,
- 0x02, 0x75, 0x01, 0x95, 0x0d, 0x06, 0x00, 0xff,
- 0x81, 0x03, 0x15, 0x00, 0x26, 0xff, 0x00, 0x05,
- 0x01, 0x09, 0x01, 0xa1, 0x00, 0x75, 0x08, 0x95,
- 0x04, 0x35, 0x00, 0x46, 0xff, 0x00, 0x09, 0x30,
- 0x09, 0x31, 0x09, 0x32, 0x09, 0x35, 0x81, 0x02,
- 0xc0, 0x05, 0x01, 0x95, 0x13, 0x09, 0x01, 0x81,
- 0x02, 0x95, 0x0c, 0x81, 0x01, 0x75, 0x10, 0x95,
- 0x04, 0x26, 0xff, 0x03, 0x46, 0xff, 0x03, 0x09,
- 0x01, 0x81, 0x02, 0xc0, 0xa1, 0x02, 0x85, 0x02,
- 0x75, 0x08, 0x95, 0x30, 0x09, 0x01, 0xb1, 0x02,
- 0xc0, 0xa1, 0x02, 0x85, 0xee, 0x75, 0x08, 0x95,
- 0x30, 0x09, 0x01, 0xb1, 0x02, 0xc0, 0xa1, 0x02,
- 0x85, 0xef, 0x75, 0x08, 0x95, 0x30, 0x09, 0x01,
- 0xb1, 0x02, 0xc0, 0xc0,
+static __u8 sixaxis_rdesc[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x04, /* Usage (Joystik), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x13, /* Report Count (19), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x35, 0x00, /* Physical Minimum (0), */
+ 0x45, 0x01, /* Physical Maximum (1), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x13, /* Usage Maximum (13h), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x0D, /* Report Count (13), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x35, 0x00, /* Physical Minimum (0), */
+ 0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x09, 0x32, /* Usage (Z), */
+ 0x09, 0x35, /* Usage (Rz), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x95, 0x13, /* Report Count (19), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x0C, /* Report Count (12), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+ 0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x30, /* Report Count (48), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x85, 0xEE, /* Report ID (238), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x30, /* Report Count (48), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x85, 0xEF, /* Report ID (239), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x30, /* Report Count (48), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
};
/*
@@ -778,6 +827,13 @@ struct sony_sc {
__u8 led_count;
};
+static __u8 *sixaxis_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ *rsize = sizeof(sixaxis_rdesc);
+ return sixaxis_rdesc;
+}
+
static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -819,8 +875,6 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
return 1;
}
-
-/* Sony Vaio VGX has wrongly mouse pointer declared as constant */
static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -857,20 +911,8 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(dualshock4_bt_rdesc);
}
- /* The HID descriptor exposed over BT has a trailing zero byte */
- if ((((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize == 148) ||
- ((sc->quirks & SIXAXIS_CONTROLLER_BT) && *rsize == 149)) &&
- rdesc[83] == 0x75) {
- hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
- memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
- sizeof(sixaxis_rdesc_fixup));
- } else if (sc->quirks & SIXAXIS_CONTROLLER_USB &&
- *rsize > sizeof(sixaxis_rdesc_fixup2)) {
- hid_info(hdev, "Sony Sixaxis clone detected. Using original report descriptor (size: %d clone; %d new)\n",
- *rsize, (int)sizeof(sixaxis_rdesc_fixup2));
- *rsize = sizeof(sixaxis_rdesc_fixup2);
- memcpy(rdesc, &sixaxis_rdesc_fixup2, *rsize);
- }
+ if (sc->quirks & SIXAXIS_CONTROLLER)
+ return sixaxis_fixup(hdev, rdesc, rsize);
if (sc->quirks & PS3REMOTE)
return ps3remote_fixup(hdev, rdesc, rsize);
@@ -1307,7 +1349,7 @@ static int sony_leds_init(struct sony_sc *sc)
static const char * const ds4_name_str[] = { "red", "green", "blue",
"global" };
__u8 initial_values[MAX_LEDS] = { 0 };
- __u8 max_brightness[MAX_LEDS] = { 1 };
+ __u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
__u8 use_hw_blink[MAX_LEDS] = { 0 };
BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
@@ -1830,9 +1872,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (sc->quirks & VAIO_RDESC_CONSTANT)
connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- else if (sc->quirks & SIXAXIS_CONTROLLER_USB)
- connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
+ else if (sc->quirks & SIXAXIS_CONTROLLER)
connect_mask |= HID_CONNECT_HIDDEV_FORCE;
ret = hid_hw_start(hdev, connect_mask);
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
deleted file mode 100644
index 902013ec041b..000000000000
--- a/drivers/hid/hid-wacom.c
+++ /dev/null
@@ -1,973 +0,0 @@
-/*
- * Bluetooth Wacom Tablet support
- *
- * Copyright (c) 1999 Andreas Gal
- * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
- * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
- * Copyright (c) 2006-2007 Jiri Kosina
- * Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
- * Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru>
- * Copyright (c) 2009 Bastien Nocera <hadess@hadess.net>
- * Copyright (c) 2011 Przemysław Firszt <przemo@firszt.eu>
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/module.h>
-#include <linux/leds.h>
-#include <linux/slab.h>
-#include <linux/power_supply.h>
-
-#include "hid-ids.h"
-
-#define PAD_DEVICE_ID 0x0F
-
-#define WAC_CMD_LED_CONTROL 0x20
-#define WAC_CMD_ICON_START_STOP 0x21
-#define WAC_CMD_ICON_TRANSFER 0x26
-
-struct wacom_data {
- __u16 tool;
- __u16 butstate;
- __u8 whlstate;
- __u8 features;
- __u32 id;
- __u32 serial;
- unsigned char high_speed;
- __u8 battery_capacity;
- __u8 power_raw;
- __u8 ps_connected;
- __u8 bat_charging;
- struct power_supply battery;
- struct power_supply ac;
- __u8 led_selector;
- struct led_classdev *leds[4];
-};
-
-/*percent of battery capacity for Graphire
- 8th value means AC online and show 100% capacity */
-static unsigned short batcap_gr[8] = { 1, 15, 25, 35, 50, 70, 100, 100 };
-/*percent of battery capacity for Intuos4 WL, AC has a separate bit*/
-static unsigned short batcap_i4[8] = { 1, 15, 30, 45, 60, 70, 85, 100 };
-
-static enum power_supply_property wacom_battery_props[] = {
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_SCOPE,
- POWER_SUPPLY_PROP_STATUS,
-};
-
-static enum power_supply_property wacom_ac_props[] = {
- POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_SCOPE,
-};
-
-static void wacom_scramble(__u8 *image)
-{
- __u16 mask;
- __u16 s1;
- __u16 s2;
- __u16 r1 ;
- __u16 r2 ;
- __u16 r;
- __u8 buf[256];
- int i, w, x, y, z;
-
- for (x = 0; x < 32; x++) {
- for (y = 0; y < 8; y++)
- buf[(8 * x) + (7 - y)] = image[(8 * x) + y];
- }
-
- /* Change 76543210 into GECA6420 as required by Intuos4 WL
- * HGFEDCBA HFDB7531
- */
- for (x = 0; x < 4; x++) {
- for (y = 0; y < 4; y++) {
- for (z = 0; z < 8; z++) {
- mask = 0x0001;
- r1 = 0;
- r2 = 0;
- i = (x << 6) + (y << 4) + z;
- s1 = buf[i];
- s2 = buf[i+8];
- for (w = 0; w < 8; w++) {
- r1 |= (s1 & mask);
- r2 |= (s2 & mask);
- s1 <<= 1;
- s2 <<= 1;
- mask <<= 2;
- }
- r = r1 | (r2 << 1);
- i = (x << 6) + (y << 4) + (z << 1);
- image[i] = 0xFF & r;
- image[i+1] = (0xFF00 & r) >> 8;
- }
- }
- }
-}
-
-static void wacom_set_image(struct hid_device *hdev, const char *image,
- __u8 icon_no)
-{
- __u8 rep_data[68];
- __u8 p[256];
- int ret, i, j;
-
- for (i = 0; i < 256; i++)
- p[i] = image[i];
-
- rep_data[0] = WAC_CMD_ICON_START_STOP;
- rep_data[1] = 0;
- ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
- if (ret < 0)
- goto err;
-
- rep_data[0] = WAC_CMD_ICON_TRANSFER;
- rep_data[1] = icon_no & 0x07;
-
- wacom_scramble(p);
-
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 64; j++)
- rep_data[j + 3] = p[(i << 6) + j];
-
- rep_data[2] = i;
- ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 67,
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
- }
-
- rep_data[0] = WAC_CMD_ICON_START_STOP;
- rep_data[1] = 0;
-
- ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
-
-err:
- return;
-}
-
-static void wacom_leds_set_brightness(struct led_classdev *led_dev,
- enum led_brightness value)
-{
- struct device *dev = led_dev->dev->parent;
- struct hid_device *hdev;
- struct wacom_data *wdata;
- unsigned char *buf;
- __u8 led = 0;
- int i;
-
- hdev = container_of(dev, struct hid_device, dev);
- wdata = hid_get_drvdata(hdev);
- for (i = 0; i < 4; ++i) {
- if (wdata->leds[i] == led_dev)
- wdata->led_selector = i;
- }
-
- led = wdata->led_selector | 0x04;
- buf = kzalloc(9, GFP_KERNEL);
- if (buf) {
- buf[0] = WAC_CMD_LED_CONTROL;
- buf[1] = led;
- buf[2] = value >> 2;
- buf[3] = value;
- /* use fixed brightness for OLEDs */
- buf[4] = 0x08;
- hid_hw_raw_request(hdev, buf[0], buf, 9, HID_FEATURE_REPORT,
- HID_REQ_SET_REPORT);
- kfree(buf);
- }
-
- return;
-}
-
-static enum led_brightness wacom_leds_get_brightness(struct led_classdev *led_dev)
-{
- struct wacom_data *wdata;
- struct device *dev = led_dev->dev->parent;
- int value = 0;
- int i;
-
- wdata = hid_get_drvdata(container_of(dev, struct hid_device, dev));
-
- for (i = 0; i < 4; ++i) {
- if (wdata->leds[i] == led_dev) {
- value = wdata->leds[i]->brightness;
- break;
- }
- }
-
- return value;
-}
-
-
-static int wacom_initialize_leds(struct hid_device *hdev)
-{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
- struct led_classdev *led;
- struct device *dev = &hdev->dev;
- size_t namesz = strlen(dev_name(dev)) + 12;
- char *name;
- int i, ret;
-
- wdata->led_selector = 0;
-
- for (i = 0; i < 4; i++) {
- led = kzalloc(sizeof(struct led_classdev) + namesz, GFP_KERNEL);
- if (!led) {
- hid_warn(hdev,
- "can't allocate memory for LED selector\n");
- ret = -ENOMEM;
- goto err;
- }
-
- name = (void *)&led[1];
- snprintf(name, namesz, "%s:selector:%d", dev_name(dev), i);
- led->name = name;
- led->brightness = 0;
- led->max_brightness = 127;
- led->brightness_get = wacom_leds_get_brightness;
- led->brightness_set = wacom_leds_set_brightness;
-
- wdata->leds[i] = led;
-
- ret = led_classdev_register(dev, wdata->leds[i]);
-
- if (ret) {
- wdata->leds[i] = NULL;
- kfree(led);
- hid_warn(hdev, "can't register LED\n");
- goto err;
- }
- }
-
-err:
- return ret;
-}
-
-static void wacom_destroy_leds(struct hid_device *hdev)
-{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
- struct led_classdev *led;
- int i;
-
- for (i = 0; i < 4; ++i) {
- if (wdata->leds[i]) {
- led = wdata->leds[i];
- wdata->leds[i] = NULL;
- led_classdev_unregister(led);
- kfree(led);
- }
- }
-
-}
-
-static int wacom_battery_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct wacom_data *wdata = container_of(psy,
- struct wacom_data, battery);
- int ret = 0;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = 1;
- break;
- case POWER_SUPPLY_PROP_SCOPE:
- val->intval = POWER_SUPPLY_SCOPE_DEVICE;
- break;
- case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = wdata->battery_capacity;
- break;
- case POWER_SUPPLY_PROP_STATUS:
- if (wdata->bat_charging)
- val->intval = POWER_SUPPLY_STATUS_CHARGING;
- else
- if (wdata->battery_capacity == 100 && wdata->ps_connected)
- val->intval = POWER_SUPPLY_STATUS_FULL;
- else
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static int wacom_ac_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct wacom_data *wdata = container_of(psy, struct wacom_data, ac);
- int ret = 0;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_PRESENT:
- /* fall through */
- case POWER_SUPPLY_PROP_ONLINE:
- val->intval = wdata->ps_connected;
- break;
- case POWER_SUPPLY_PROP_SCOPE:
- val->intval = POWER_SUPPLY_SCOPE_DEVICE;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static void wacom_set_features(struct hid_device *hdev, u8 speed)
-{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
- int limit, ret;
- __u8 rep_data[2];
-
- switch (hdev->product) {
- case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
- rep_data[0] = 0x03 ; rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
- } while (ret < 0 && limit-- > 0);
-
- if (ret >= 0) {
- if (speed == 0)
- rep_data[0] = 0x05;
- else
- rep_data[0] = 0x06;
-
- rep_data[1] = 0x00;
- limit = 3;
- do {
- ret = hid_hw_raw_request(hdev, rep_data[0],
- rep_data, 2, HID_FEATURE_REPORT,
- HID_REQ_SET_REPORT);
- } while (ret < 0 && limit-- > 0);
-
- if (ret >= 0) {
- wdata->high_speed = speed;
- return;
- }
- }
-
- /*
- * Note that if the raw queries fail, it's not a hard failure
- * and it is safe to continue
- */
- hid_warn(hdev, "failed to poke device, command %d, err %d\n",
- rep_data[0], ret);
- break;
- case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
- if (speed == 1)
- wdata->features &= ~0x20;
- else
- wdata->features |= 0x20;
-
- rep_data[0] = 0x03;
- rep_data[1] = wdata->features;
-
- ret = hid_hw_raw_request(hdev, rep_data[0], rep_data, 2,
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
- if (ret >= 0)
- wdata->high_speed = speed;
- break;
- }
-
- return;
-}
-
-static ssize_t wacom_show_speed(struct device *dev,
- struct device_attribute
- *attr, char *buf)
-{
- struct wacom_data *wdata = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%i\n", wdata->high_speed);
-}
-
-static ssize_t wacom_store_speed(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hid_device *hdev = container_of(dev, struct hid_device, dev);
- int new_speed;
-
- if (sscanf(buf, "%1d", &new_speed ) != 1)
- return -EINVAL;
-
- if (new_speed == 0 || new_speed == 1) {
- wacom_set_features(hdev, new_speed);
- return strnlen(buf, PAGE_SIZE);
- } else
- return -EINVAL;
-}
-
-static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
- wacom_show_speed, wacom_store_speed);
-
-#define WACOM_STORE(OLED_ID) \
-static ssize_t wacom_oled##OLED_ID##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct hid_device *hdev = container_of(dev, struct hid_device, \
- dev); \
- \
- if (count != 256) \
- return -EINVAL; \
- \
- wacom_set_image(hdev, buf, OLED_ID); \
- \
- return count; \
-} \
- \
-static DEVICE_ATTR(oled##OLED_ID##_img, S_IWUSR | S_IWGRP, NULL, \
- wacom_oled##OLED_ID##_store)
-
-WACOM_STORE(0);
-WACOM_STORE(1);
-WACOM_STORE(2);
-WACOM_STORE(3);
-WACOM_STORE(4);
-WACOM_STORE(5);
-WACOM_STORE(6);
-WACOM_STORE(7);
-
-static int wacom_gr_parse_report(struct hid_device *hdev,
- struct wacom_data *wdata,
- struct input_dev *input, unsigned char *data)
-{
- int tool, x, y, rw;
-
- tool = 0;
- /* Get X & Y positions */
- x = le16_to_cpu(*(__le16 *) &data[2]);
- y = le16_to_cpu(*(__le16 *) &data[4]);
-
- /* Get current tool identifier */
- if (data[1] & 0x90) { /* If pen is in the in/active area */
- switch ((data[1] >> 5) & 3) {
- case 0: /* Pen */
- tool = BTN_TOOL_PEN;
- break;
-
- case 1: /* Rubber */
- tool = BTN_TOOL_RUBBER;
- break;
-
- case 2: /* Mouse with wheel */
- case 3: /* Mouse without wheel */
- tool = BTN_TOOL_MOUSE;
- break;
- }
-
- /* Reset tool if out of active tablet area */
- if (!(data[1] & 0x10))
- tool = 0;
- }
-
- /* If tool changed, notify input subsystem */
- if (wdata->tool != tool) {
- if (wdata->tool) {
- /* Completely reset old tool state */
- if (wdata->tool == BTN_TOOL_MOUSE) {
- input_report_key(input, BTN_LEFT, 0);
- input_report_key(input, BTN_RIGHT, 0);
- input_report_key(input, BTN_MIDDLE, 0);
- input_report_abs(input, ABS_DISTANCE,
- input_abs_get_max(input, ABS_DISTANCE));
- } else {
- input_report_key(input, BTN_TOUCH, 0);
- input_report_key(input, BTN_STYLUS, 0);
- input_report_key(input, BTN_STYLUS2, 0);
- input_report_abs(input, ABS_PRESSURE, 0);
- }
- input_report_key(input, wdata->tool, 0);
- input_sync(input);
- }
- wdata->tool = tool;
- if (tool)
- input_report_key(input, tool, 1);
- }
-
- if (tool) {
- input_report_abs(input, ABS_X, x);
- input_report_abs(input, ABS_Y, y);
-
- switch ((data[1] >> 5) & 3) {
- case 2: /* Mouse with wheel */
- input_report_key(input, BTN_MIDDLE, data[1] & 0x04);
- rw = (data[6] & 0x01) ? -1 :
- (data[6] & 0x02) ? 1 : 0;
- input_report_rel(input, REL_WHEEL, rw);
- /* fall through */
-
- case 3: /* Mouse without wheel */
- input_report_key(input, BTN_LEFT, data[1] & 0x01);
- input_report_key(input, BTN_RIGHT, data[1] & 0x02);
- /* Compute distance between mouse and tablet */
- rw = 44 - (data[6] >> 2);
- if (rw < 0)
- rw = 0;
- else if (rw > 31)
- rw = 31;
- input_report_abs(input, ABS_DISTANCE, rw);
- break;
-
- default:
- input_report_abs(input, ABS_PRESSURE,
- data[6] | (((__u16) (data[1] & 0x08)) << 5));
- input_report_key(input, BTN_TOUCH, data[1] & 0x01);
- input_report_key(input, BTN_STYLUS, data[1] & 0x02);
- input_report_key(input, BTN_STYLUS2, (tool == BTN_TOOL_PEN) && data[1] & 0x04);
- break;
- }
-
- input_sync(input);
- }
-
- /* Report the state of the two buttons at the top of the tablet
- * as two extra fingerpad keys (buttons 4 & 5). */
- rw = data[7] & 0x03;
- if (rw != wdata->butstate) {
- wdata->butstate = rw;
- input_report_key(input, BTN_0, rw & 0x02);
- input_report_key(input, BTN_1, rw & 0x01);
- input_report_key(input, BTN_TOOL_FINGER, 0xf0);
- input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
- input_sync(input);
- }
-
- /* Store current battery capacity and power supply state*/
- rw = (data[7] >> 2 & 0x07);
- if (rw != wdata->power_raw) {
- wdata->power_raw = rw;
- wdata->battery_capacity = batcap_gr[rw];
- if (rw == 7)
- wdata->ps_connected = 1;
- else
- wdata->ps_connected = 0;
- }
- return 1;
-}
-
-static void wacom_i4_parse_button_report(struct wacom_data *wdata,
- struct input_dev *input, unsigned char *data)
-{
- __u16 new_butstate;
- __u8 new_whlstate;
- __u8 sync = 0;
-
- new_whlstate = data[1];
- if (new_whlstate != wdata->whlstate) {
- wdata->whlstate = new_whlstate;
- if (new_whlstate & 0x80) {
- input_report_key(input, BTN_TOUCH, 1);
- input_report_abs(input, ABS_WHEEL, (new_whlstate & 0x7f));
- input_report_key(input, BTN_TOOL_FINGER, 1);
- } else {
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_WHEEL, 0);
- input_report_key(input, BTN_TOOL_FINGER, 0);
- }
- sync = 1;
- }
-
- new_butstate = (data[3] << 1) | (data[2] & 0x01);
- if (new_butstate != wdata->butstate) {
- wdata->butstate = new_butstate;
- input_report_key(input, BTN_0, new_butstate & 0x001);
- input_report_key(input, BTN_1, new_butstate & 0x002);
- input_report_key(input, BTN_2, new_butstate & 0x004);
- input_report_key(input, BTN_3, new_butstate & 0x008);
- input_report_key(input, BTN_4, new_butstate & 0x010);
- input_report_key(input, BTN_5, new_butstate & 0x020);
- input_report_key(input, BTN_6, new_butstate & 0x040);
- input_report_key(input, BTN_7, new_butstate & 0x080);
- input_report_key(input, BTN_8, new_butstate & 0x100);
- input_report_key(input, BTN_TOOL_FINGER, 1);
- sync = 1;
- }
-
- if (sync) {
- input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
- input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff);
- input_sync(input);
- }
-}
-
-static void wacom_i4_parse_pen_report(struct wacom_data *wdata,
- struct input_dev *input, unsigned char *data)
-{
- __u16 x, y, pressure;
- __u8 distance;
- __u8 tilt_x, tilt_y;
-
- switch (data[1]) {
- case 0x80: /* Out of proximity report */
- input_report_key(input, BTN_TOUCH, 0);
- input_report_abs(input, ABS_PRESSURE, 0);
- input_report_key(input, BTN_STYLUS, 0);
- input_report_key(input, BTN_STYLUS2, 0);
- input_report_key(input, wdata->tool, 0);
- input_report_abs(input, ABS_MISC, 0);
- input_event(input, EV_MSC, MSC_SERIAL, wdata->serial);
- wdata->tool = 0;
- input_sync(input);
- break;
- case 0xC2: /* Tool report */
- wdata->id = ((data[2] << 4) | (data[3] >> 4) |
- ((data[7] & 0x0f) << 20) |
- ((data[8] & 0xf0) << 12));
- wdata->serial = ((data[3] & 0x0f) << 28) +
- (data[4] << 20) + (data[5] << 12) +
- (data[6] << 4) + (data[7] >> 4);
-
- switch (wdata->id) {
- case 0x100802:
- wdata->tool = BTN_TOOL_PEN;
- break;
- case 0x10080A:
- wdata->tool = BTN_TOOL_RUBBER;
- break;
- }
- break;
- default: /* Position/pressure report */
- x = data[2] << 9 | data[3] << 1 | ((data[9] & 0x02) >> 1);
- y = data[4] << 9 | data[5] << 1 | (data[9] & 0x01);
- pressure = (data[6] << 3) | ((data[7] & 0xC0) >> 5)
- | (data[1] & 0x01);
- distance = (data[9] >> 2) & 0x3f;
- tilt_x = ((data[7] << 1) & 0x7e) | (data[8] >> 7);
- tilt_y = data[8] & 0x7f;
-
- input_report_key(input, BTN_TOUCH, pressure > 1);
-
- input_report_key(input, BTN_STYLUS, data[1] & 0x02);
- input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
- input_report_key(input, wdata->tool, 1);
- input_report_abs(input, ABS_X, x);
- input_report_abs(input, ABS_Y, y);
- input_report_abs(input, ABS_PRESSURE, pressure);
- input_report_abs(input, ABS_DISTANCE, distance);
- input_report_abs(input, ABS_TILT_X, tilt_x);
- input_report_abs(input, ABS_TILT_Y, tilt_y);
- input_report_abs(input, ABS_MISC, wdata->id);
- input_event(input, EV_MSC, MSC_SERIAL, wdata->serial);
- input_report_key(input, wdata->tool, 1);
- input_sync(input);
- break;
- }
-
- return;
-}
-
-static void wacom_i4_parse_report(struct hid_device *hdev,
- struct wacom_data *wdata,
- struct input_dev *input, unsigned char *data)
-{
- switch (data[0]) {
- case 0x00: /* Empty report */
- break;
- case 0x02: /* Pen report */
- wacom_i4_parse_pen_report(wdata, input, data);
- break;
- case 0x03: /* Features Report */
- wdata->features = data[2];
- break;
- case 0x0C: /* Button report */
- wacom_i4_parse_button_report(wdata, input, data);
- break;
- default:
- hid_err(hdev, "Unknown report: %d,%d\n", data[0], data[1]);
- break;
- }
-}
-
-static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
- u8 *raw_data, int size)
-{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
- struct hid_input *hidinput;
- struct input_dev *input;
- unsigned char *data = (unsigned char *) raw_data;
- int i;
- __u8 power_raw;
-
- if (!(hdev->claimed & HID_CLAIMED_INPUT))
- return 0;
-
- hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
- input = hidinput->input;
-
- switch (hdev->product) {
- case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
- if (data[0] == 0x03) {
- return wacom_gr_parse_report(hdev, wdata, input, data);
- } else {
- hid_err(hdev, "Unknown report: %d,%d size:%d\n",
- data[0], data[1], size);
- return 0;
- }
- break;
- case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
- i = 1;
-
- switch (data[0]) {
- case 0x04:
- wacom_i4_parse_report(hdev, wdata, input, data + i);
- i += 10;
- /* fall through */
- case 0x03:
- wacom_i4_parse_report(hdev, wdata, input, data + i);
- i += 10;
- wacom_i4_parse_report(hdev, wdata, input, data + i);
- power_raw = data[i+10];
- if (power_raw != wdata->power_raw) {
- wdata->power_raw = power_raw;
- wdata->battery_capacity = batcap_i4[power_raw & 0x07];
- wdata->bat_charging = (power_raw & 0x08) ? 1 : 0;
- wdata->ps_connected = (power_raw & 0x10) ? 1 : 0;
- }
-
- break;
- default:
- hid_err(hdev, "Unknown report: %d,%d size:%d\n",
- data[0], data[1], size);
- return 0;
- }
- }
- return 1;
-}
-
-static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage, unsigned long **bit,
- int *max)
-{
- struct input_dev *input = hi->input;
-
- __set_bit(INPUT_PROP_POINTER, input->propbit);
-
- /* Basics */
- input->evbit[0] |= BIT(EV_KEY) | BIT(EV_ABS) | BIT(EV_REL);
-
- __set_bit(REL_WHEEL, input->relbit);
-
- __set_bit(BTN_TOOL_PEN, input->keybit);
- __set_bit(BTN_TOUCH, input->keybit);
- __set_bit(BTN_STYLUS, input->keybit);
- __set_bit(BTN_STYLUS2, input->keybit);
- __set_bit(BTN_LEFT, input->keybit);
- __set_bit(BTN_RIGHT, input->keybit);
- __set_bit(BTN_MIDDLE, input->keybit);
-
- /* Pad */
- input_set_capability(input, EV_MSC, MSC_SERIAL);
-
- __set_bit(BTN_0, input->keybit);
- __set_bit(BTN_1, input->keybit);
- __set_bit(BTN_TOOL_FINGER, input->keybit);
-
- /* Distance, rubber and mouse */
- __set_bit(BTN_TOOL_RUBBER, input->keybit);
- __set_bit(BTN_TOOL_MOUSE, input->keybit);
-
- switch (hdev->product) {
- case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
- input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
- input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
- input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
- break;
- case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
- __set_bit(ABS_WHEEL, input->absbit);
- __set_bit(ABS_MISC, input->absbit);
- __set_bit(BTN_2, input->keybit);
- __set_bit(BTN_3, input->keybit);
- __set_bit(BTN_4, input->keybit);
- __set_bit(BTN_5, input->keybit);
- __set_bit(BTN_6, input->keybit);
- __set_bit(BTN_7, input->keybit);
- __set_bit(BTN_8, input->keybit);
- input_set_abs_params(input, ABS_WHEEL, 0, 71, 0, 0);
- input_set_abs_params(input, ABS_X, 0, 40640, 4, 0);
- input_set_abs_params(input, ABS_Y, 0, 25400, 4, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 2047, 0, 0);
- input_set_abs_params(input, ABS_DISTANCE, 0, 63, 0, 0);
- input_set_abs_params(input, ABS_TILT_X, 0, 127, 0, 0);
- input_set_abs_params(input, ABS_TILT_Y, 0, 127, 0, 0);
- break;
- }
-
- return 0;
-}
-
-static int wacom_probe(struct hid_device *hdev,
- const struct hid_device_id *id)
-{
- struct wacom_data *wdata;
- int ret;
-
- wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
- if (wdata == NULL) {
- hid_err(hdev, "can't alloc wacom descriptor\n");
- return -ENOMEM;
- }
-
- hid_set_drvdata(hdev, wdata);
-
- /* Parse the HID report now */
- ret = hid_parse(hdev);
- if (ret) {
- hid_err(hdev, "parse failed\n");
- goto err_free;
- }
-
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
- if (ret) {
- hid_err(hdev, "hw start failed\n");
- goto err_free;
- }
-
- ret = device_create_file(&hdev->dev, &dev_attr_speed);
- if (ret)
- hid_warn(hdev,
- "can't create sysfs speed attribute err: %d\n", ret);
-
-#define OLED_INIT(OLED_ID) \
- do { \
- ret = device_create_file(&hdev->dev, \
- &dev_attr_oled##OLED_ID##_img); \
- if (ret) \
- hid_warn(hdev, \
- "can't create sysfs oled attribute, err: %d\n", ret);\
- } while (0)
-
-OLED_INIT(0);
-OLED_INIT(1);
-OLED_INIT(2);
-OLED_INIT(3);
-OLED_INIT(4);
-OLED_INIT(5);
-OLED_INIT(6);
-OLED_INIT(7);
-
- wdata->features = 0;
- wacom_set_features(hdev, 1);
-
- if (hdev->product == USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) {
- sprintf(hdev->name, "%s", "Wacom Intuos4 WL");
- ret = wacom_initialize_leds(hdev);
- if (ret)
- hid_warn(hdev,
- "can't create led attribute, err: %d\n", ret);
- }
-
- wdata->battery.properties = wacom_battery_props;
- wdata->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
- wdata->battery.get_property = wacom_battery_get_property;
- wdata->battery.name = "wacom_battery";
- wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
- wdata->battery.use_for_apm = 0;
-
-
- ret = power_supply_register(&hdev->dev, &wdata->battery);
- if (ret) {
- hid_err(hdev, "can't create sysfs battery attribute, err: %d\n",
- ret);
- goto err_battery;
- }
-
- power_supply_powers(&wdata->battery, &hdev->dev);
-
- wdata->ac.properties = wacom_ac_props;
- wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
- wdata->ac.get_property = wacom_ac_get_property;
- wdata->ac.name = "wacom_ac";
- wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
- wdata->ac.use_for_apm = 0;
-
- ret = power_supply_register(&hdev->dev, &wdata->ac);
- if (ret) {
- hid_err(hdev,
- "can't create ac battery attribute, err: %d\n", ret);
- goto err_ac;
- }
-
- power_supply_powers(&wdata->ac, &hdev->dev);
- return 0;
-
-err_ac:
- power_supply_unregister(&wdata->battery);
-err_battery:
- wacom_destroy_leds(hdev);
- device_remove_file(&hdev->dev, &dev_attr_oled0_img);
- device_remove_file(&hdev->dev, &dev_attr_oled1_img);
- device_remove_file(&hdev->dev, &dev_attr_oled2_img);
- device_remove_file(&hdev->dev, &dev_attr_oled3_img);
- device_remove_file(&hdev->dev, &dev_attr_oled4_img);
- device_remove_file(&hdev->dev, &dev_attr_oled5_img);
- device_remove_file(&hdev->dev, &dev_attr_oled6_img);
- device_remove_file(&hdev->dev, &dev_attr_oled7_img);
- device_remove_file(&hdev->dev, &dev_attr_speed);
- hid_hw_stop(hdev);
-err_free:
- kfree(wdata);
- return ret;
-}
-
-static void wacom_remove(struct hid_device *hdev)
-{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
-
- wacom_destroy_leds(hdev);
- device_remove_file(&hdev->dev, &dev_attr_oled0_img);
- device_remove_file(&hdev->dev, &dev_attr_oled1_img);
- device_remove_file(&hdev->dev, &dev_attr_oled2_img);
- device_remove_file(&hdev->dev, &dev_attr_oled3_img);
- device_remove_file(&hdev->dev, &dev_attr_oled4_img);
- device_remove_file(&hdev->dev, &dev_attr_oled5_img);
- device_remove_file(&hdev->dev, &dev_attr_oled6_img);
- device_remove_file(&hdev->dev, &dev_attr_oled7_img);
- device_remove_file(&hdev->dev, &dev_attr_speed);
- hid_hw_stop(hdev);
-
- power_supply_unregister(&wdata->battery);
- power_supply_unregister(&wdata->ac);
- kfree(hid_get_drvdata(hdev));
-}
-
-static const struct hid_device_id wacom_devices[] = {
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
-
- { }
-};
-MODULE_DEVICE_TABLE(hid, wacom_devices);
-
-static struct hid_driver wacom_driver = {
- .name = "wacom",
- .id_table = wacom_devices,
- .probe = wacom_probe,
- .remove = wacom_remove,
- .raw_event = wacom_raw_event,
- .input_mapped = wacom_input_mapped,
-};
-module_hid_driver(wacom_driver);
-
-MODULE_DESCRIPTION("Driver for Wacom Graphire Bluetooth and Wacom Intuos4 WL");
-MODULE_LICENSE("GPL");
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 21aafc8f48c8..747d54421e73 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -1054,21 +1054,29 @@ static int i2c_hid_remove(struct i2c_client *client)
static int i2c_hid_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ struct hid_device *hid = ihid->hid;
+ int ret = 0;
disable_irq(client->irq);
if (device_may_wakeup(&client->dev))
enable_irq_wake(client->irq);
+ if (hid->driver && hid->driver->suspend)
+ ret = hid->driver->suspend(hid, PMSG_SUSPEND);
+
/* Save some power */
i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
- return 0;
+ return ret;
}
static int i2c_hid_resume(struct device *dev)
{
int ret;
struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+ struct hid_device *hid = ihid->hid;
enable_irq(client->irq);
ret = i2c_hid_hwreset(client);
@@ -1078,6 +1086,11 @@ static int i2c_hid_resume(struct device *dev)
if (device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
+ if (hid->driver && hid->driver->reset_resume) {
+ ret = hid->driver->reset_resume(hid);
+ return ret;
+ }
+
return 0;
}
#endif
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 7b88f4cb9902..79cf503e37bf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -58,7 +58,7 @@ module_param_named(ignoreled, ignoreled, uint, 0644);
MODULE_PARM_DESC(ignoreled, "Autosuspend with active leds");
/* Quirks specified at module load time */
-static char *quirks_param[MAX_USBHID_BOOT_QUIRKS] = { [ 0 ... (MAX_USBHID_BOOT_QUIRKS - 1) ] = NULL };
+static char *quirks_param[MAX_USBHID_BOOT_QUIRKS];
module_param_array_named(quirks, quirks_param, charp, NULL, 0444);
MODULE_PARM_DESC(quirks, "Add/modify USB HID quirks by specifying "
" quirks=vendorID:productID:quirks"
@@ -536,7 +536,8 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
int head;
struct usbhid_device *usbhid = hid->driver_data;
- if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN)
+ if (((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) ||
+ test_bit(HID_DISCONNECTED, &usbhid->iofl))
return;
if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) {
@@ -1366,6 +1367,9 @@ static void usbhid_disconnect(struct usb_interface *intf)
return;
usbhid = hid->driver_data;
+ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
+ set_bit(HID_DISCONNECTED, &usbhid->iofl);
+ spin_unlock_irq(&usbhid->lock);
hid_destroy_device(hid);
kfree(usbhid);
}
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 31e6727cd009..15225f3eaed1 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,7 +74,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
- { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096, HID_QUIRK_NO_INIT_INPUT_REPORTS },
{ 0, 0 }
};
diff --git a/drivers/input/tablet/wacom.h b/drivers/hid/wacom.h
index 9ebf0ed3b3b3..64bc1b296d91 100644
--- a/drivers/input/tablet/wacom.h
+++ b/drivers/hid/wacom.h
@@ -12,6 +12,7 @@
* Copyright (c) 2001 Frederic Lepied <flepied@mandrakesoft.com>
* Copyright (c) 2004 Panagiotis Issaris <panagiotis.issaris@mech.kuleuven.ac.be>
* Copyright (c) 2002-2011 Ping Cheng <pingc@wacom.com>
+ * Copyright (c) 2014 Benjamin Tissoires <benjamin.tissoires@redhat.com>
*
* ChangeLog:
* v0.1 (vp) - Initial release
@@ -72,6 +73,8 @@
* v1.52 (pc) - Query Wacom data upon system resume
* - add defines for features->type
* - add new devices (0x9F, 0xE2, and 0XE3)
+ * v2.00 (bt) - conversion to a HID driver
+ * - integration of the Bluetooth devices
*/
/*
@@ -93,35 +96,30 @@
/*
* Version Information
*/
-#define DRIVER_VERSION "v1.53"
+#define DRIVER_VERSION "v2.00"
#define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@ucw.cz>"
#define DRIVER_DESC "USB Wacom tablet driver"
#define DRIVER_LICENSE "GPL"
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE(DRIVER_LICENSE);
-
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_VENDOR_ID_LENOVO 0x17ef
struct wacom {
- dma_addr_t data_dma;
struct usb_device *usbdev;
struct usb_interface *intf;
- struct urb *irq;
struct wacom_wac wacom_wac;
+ struct hid_device *hdev;
struct mutex lock;
struct work_struct work;
- bool open;
- char phys[32];
struct wacom_led {
u8 select[2]; /* status led selector (0..3) */
u8 llv; /* status led brightness no button (1..127) */
u8 hlv; /* status led brightness button pressed (1..127) */
u8 img_lum; /* OLED matrix display brightness */
} led;
+ bool led_initialized;
struct power_supply battery;
+ struct power_supply ac;
};
static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
@@ -130,10 +128,19 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac)
schedule_work(&wacom->work);
}
-extern const struct usb_device_id wacom_ids[];
+static inline void wacom_notify_battery(struct wacom_wac *wacom_wac)
+{
+ struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
+
+ power_supply_changed(&wacom->battery);
+}
+
+extern const struct hid_device_id wacom_ids[];
void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
void wacom_setup_device_quirks(struct wacom_features *features);
int wacom_setup_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac);
+int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac);
#endif
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/hid/wacom_sys.c
index 2c613cd41dd6..3e388ec31da8 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -13,246 +13,106 @@
#include "wacom_wac.h"
#include "wacom.h"
+#include <linux/hid.h>
-/* defines to get HID report descriptor */
-#define HID_DEVICET_HID (USB_TYPE_CLASS | 0x01)
-#define HID_DEVICET_REPORT (USB_TYPE_CLASS | 0x02)
-#define HID_USAGE_UNDEFINED 0x00
-#define HID_USAGE_PAGE 0x05
-#define HID_USAGE_PAGE_DIGITIZER 0x0d
-#define HID_USAGE_PAGE_DESKTOP 0x01
-#define HID_USAGE 0x09
-#define HID_USAGE_X ((HID_USAGE_PAGE_DESKTOP << 16) | 0x30)
-#define HID_USAGE_Y ((HID_USAGE_PAGE_DESKTOP << 16) | 0x31)
-#define HID_USAGE_PRESSURE ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x30)
-#define HID_USAGE_X_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3d)
-#define HID_USAGE_Y_TILT ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x3e)
-#define HID_USAGE_FINGER ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x22)
-#define HID_USAGE_STYLUS ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x20)
-#define HID_USAGE_CONTACTMAX ((HID_USAGE_PAGE_DIGITIZER << 16) | 0x55)
-#define HID_COLLECTION 0xa1
-#define HID_COLLECTION_LOGICAL 0x02
-#define HID_COLLECTION_END 0xc0
-
-struct hid_descriptor {
- struct usb_descriptor_header header;
- __le16 bcdHID;
- u8 bCountryCode;
- u8 bNumDescriptors;
- u8 bDescriptorType;
- __le16 wDescriptorLength;
-} __attribute__ ((packed));
-
-/* defines to get/set USB message */
-#define USB_REQ_GET_REPORT 0x01
-#define USB_REQ_SET_REPORT 0x09
-
-#define WAC_HID_FEATURE_REPORT 0x03
#define WAC_MSG_RETRIES 5
#define WAC_CMD_LED_CONTROL 0x20
#define WAC_CMD_ICON_START 0x21
#define WAC_CMD_ICON_XFER 0x23
+#define WAC_CMD_ICON_BT_XFER 0x26
#define WAC_CMD_RETRIES 10
-static int wacom_get_report(struct usb_interface *intf, u8 type, u8 id,
+static int wacom_get_report(struct hid_device *hdev, u8 type, u8 id,
void *buf, size_t size, unsigned int retries)
{
- struct usb_device *dev = interface_to_usbdev(intf);
int retval;
do {
- retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- USB_REQ_GET_REPORT,
- USB_DIR_IN | USB_TYPE_CLASS |
- USB_RECIP_INTERFACE,
- (type << 8) + id,
- intf->altsetting[0].desc.bInterfaceNumber,
- buf, size, 100);
+ retval = hid_hw_raw_request(hdev, id, buf, size, type,
+ HID_REQ_GET_REPORT);
} while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries);
return retval;
}
-static int wacom_set_report(struct usb_interface *intf, u8 type, u8 id,
- void *buf, size_t size, unsigned int retries)
+static int wacom_set_report(struct hid_device *hdev, u8 type, u8 *buf,
+ size_t size, unsigned int retries)
{
- struct usb_device *dev = interface_to_usbdev(intf);
int retval;
do {
- retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
- USB_REQ_SET_REPORT,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- (type << 8) + id,
- intf->altsetting[0].desc.bInterfaceNumber,
- buf, size, 1000);
+ retval = hid_hw_raw_request(hdev, buf[0], buf, size, type,
+ HID_REQ_SET_REPORT);
} while ((retval == -ETIMEDOUT || retval == -EPIPE) && --retries);
return retval;
}
-static void wacom_sys_irq(struct urb *urb)
+static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
{
- struct wacom *wacom = urb->context;
- struct device *dev = &wacom->intf->dev;
- int retval;
+ struct wacom *wacom = hid_get_drvdata(hdev);
- switch (urb->status) {
- case 0:
- /* success */
- break;
- case -ECONNRESET:
- case -ENOENT:
- case -ESHUTDOWN:
- /* this urb is terminated, clean up */
- dev_dbg(dev, "%s - urb shutting down with status: %d\n",
- __func__, urb->status);
- return;
- default:
- dev_dbg(dev, "%s - nonzero urb status received: %d\n",
- __func__, urb->status);
- goto exit;
- }
+ if (size > WACOM_PKGLEN_MAX)
+ return 1;
+
+ memcpy(wacom->wacom_wac.data, raw_data, size);
- wacom_wac_irq(&wacom->wacom_wac, urb->actual_length);
+ wacom_wac_irq(&wacom->wacom_wac, size);
- exit:
- usb_mark_last_busy(wacom->usbdev);
- retval = usb_submit_urb(urb, GFP_ATOMIC);
- if (retval)
- dev_err(dev, "%s - usb_submit_urb failed with result %d\n",
- __func__, retval);
+ return 0;
}
static int wacom_open(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
- int retval = 0;
-
- if (usb_autopm_get_interface(wacom->intf) < 0)
- return -EIO;
+ int retval;
mutex_lock(&wacom->lock);
-
- if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
- retval = -EIO;
- goto out;
- }
-
- wacom->open = true;
- wacom->intf->needs_remote_wakeup = 1;
-
-out:
+ retval = hid_hw_open(wacom->hdev);
mutex_unlock(&wacom->lock);
- usb_autopm_put_interface(wacom->intf);
+
return retval;
}
static void wacom_close(struct input_dev *dev)
{
struct wacom *wacom = input_get_drvdata(dev);
- int autopm_error;
-
- autopm_error = usb_autopm_get_interface(wacom->intf);
mutex_lock(&wacom->lock);
- usb_kill_urb(wacom->irq);
- wacom->open = false;
- wacom->intf->needs_remote_wakeup = 0;
+ hid_hw_close(wacom->hdev);
mutex_unlock(&wacom->lock);
-
- if (!autopm_error)
- usb_autopm_put_interface(wacom->intf);
}
/*
- * Calculate the resolution of the X or Y axis, given appropriate HID data.
- * This function is little more than hidinput_calc_abs_res stripped down.
+ * Calculate the resolution of the X or Y axis using hidinput_calc_abs_res.
*/
static int wacom_calc_hid_res(int logical_extents, int physical_extents,
- unsigned char unit, unsigned char exponent)
-{
- int prev, unit_exponent;
-
- /* Check if the extents are sane */
- if (logical_extents <= 0 || physical_extents <= 0)
- return 0;
-
- /* Get signed value of nybble-sized twos-compliment exponent */
- unit_exponent = exponent;
- if (unit_exponent > 7)
- unit_exponent -= 16;
-
- /* Convert physical_extents to millimeters */
- if (unit == 0x11) { /* If centimeters */
- unit_exponent += 1;
- } else if (unit == 0x13) { /* If inches */
- prev = physical_extents;
- physical_extents *= 254;
- if (physical_extents < prev)
- return 0;
- unit_exponent -= 1;
- } else {
- return 0;
- }
-
- /* Apply negative unit exponent */
- for (; unit_exponent < 0; unit_exponent++) {
- prev = logical_extents;
- logical_extents *= 10;
- if (logical_extents < prev)
- return 0;
- }
- /* Apply positive unit exponent */
- for (; unit_exponent > 0; unit_exponent--) {
- prev = physical_extents;
- physical_extents *= 10;
- if (physical_extents < prev)
- return 0;
- }
-
- /* Calculate resolution */
- return logical_extents / physical_extents;
-}
-
-static int wacom_parse_logical_collection(unsigned char *report,
- struct wacom_features *features)
+ unsigned unit, int exponent)
{
- int length = 0;
-
- if (features->type == BAMBOO_PT) {
-
- /* Logical collection is only used by 3rd gen Bamboo Touch */
- features->pktlen = WACOM_PKGLEN_BBTOUCH3;
- features->device_type = BTN_TOOL_FINGER;
-
- features->x_max = features->y_max =
- get_unaligned_le16(&report[10]);
-
- length = 11;
- }
- return length;
+ struct hid_field field = {
+ .logical_maximum = logical_extents,
+ .physical_maximum = physical_extents,
+ .unit = unit,
+ .unit_exponent = exponent,
+ };
+
+ return hidinput_calc_abs_res(&field, ABS_X);
}
-static void wacom_retrieve_report_data(struct usb_interface *intf,
- struct wacom_features *features)
+static void wacom_feature_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
{
- int result = 0;
- unsigned char *rep_data;
-
- rep_data = kmalloc(2, GFP_KERNEL);
- if (rep_data) {
-
- rep_data[0] = 12;
- result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
- rep_data[0], rep_data, 2,
- WAC_MSG_RETRIES);
-
- if (result >= 0 && rep_data[1] > 2)
- features->touch_max = rep_data[1];
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_features *features = &wacom->wacom_wac.features;
- kfree(rep_data);
+ switch (usage->hid) {
+ case HID_DG_CONTACTMAX:
+ /* leave touch_max as is if predefined */
+ if (!features->touch_max)
+ features->touch_max = field->value[0];
+ break;
}
}
@@ -285,243 +145,100 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
* interfaces haven't supported pressure or distance, this is enough
* information to override invalid values in the wacom_features table.
*
- * 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical
- * Collection. Instead they define a Logical Collection with a single
- * Logical Maximum for both X and Y.
- *
- * Intuos5 touch interface does not contain useful data. We deal with
- * this after returning from this function.
+ * Intuos5 touch interface and 3rd gen Bamboo Touch do not contain useful
+ * data. We deal with them after returning from this function.
*/
-static int wacom_parse_hid(struct usb_interface *intf,
- struct hid_descriptor *hid_desc,
- struct wacom_features *features)
+static void wacom_usage_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
{
- struct usb_device *dev = interface_to_usbdev(intf);
- char limit = 0;
- /* result has to be defined as int for some devices */
- int result = 0, touch_max = 0;
- int i = 0, page = 0, finger = 0, pen = 0;
- unsigned char *report;
-
- report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL);
- if (!report)
- return -ENOMEM;
-
- /* retrive report descriptors */
- do {
- result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- USB_REQ_GET_DESCRIPTOR,
- USB_RECIP_INTERFACE | USB_DIR_IN,
- HID_DEVICET_REPORT << 8,
- intf->altsetting[0].desc.bInterfaceNumber, /* interface */
- report,
- hid_desc->wDescriptorLength,
- 5000); /* 5 secs */
- } while (result < 0 && limit++ < WAC_MSG_RETRIES);
-
- /* No need to parse the Descriptor. It isn't an error though */
- if (result < 0)
- goto out;
-
- for (i = 0; i < hid_desc->wDescriptorLength; i++) {
-
- switch (report[i]) {
- case HID_USAGE_PAGE:
- page = report[i + 1];
- i++;
- break;
-
- case HID_USAGE:
- switch (page << 16 | report[i + 1]) {
- case HID_USAGE_X:
- if (finger) {
- features->device_type = BTN_TOOL_FINGER;
- /* touch device at least supports one touch point */
- touch_max = 1;
- switch (features->type) {
- case TABLETPC2FG:
- features->pktlen = WACOM_PKGLEN_TPC2FG;
- break;
-
- case MTSCREEN:
- case WACOM_24HDT:
- features->pktlen = WACOM_PKGLEN_MTOUCH;
- break;
-
- case MTTPC:
- case MTTPC_B:
- features->pktlen = WACOM_PKGLEN_MTTPC;
- break;
-
- case BAMBOO_PT:
- features->pktlen = WACOM_PKGLEN_BBTOUCH;
- break;
-
- default:
- features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- break;
- }
-
- switch (features->type) {
- case BAMBOO_PT:
- features->x_phy =
- get_unaligned_le16(&report[i + 5]);
- features->x_max =
- get_unaligned_le16(&report[i + 8]);
- i += 15;
- break;
-
- case WACOM_24HDT:
- features->x_max =
- get_unaligned_le16(&report[i + 3]);
- features->x_phy =
- get_unaligned_le16(&report[i + 8]);
- features->unit = report[i - 1];
- features->unitExpo = report[i - 3];
- i += 12;
- break;
-
- case MTTPC_B:
- features->x_max =
- get_unaligned_le16(&report[i + 3]);
- features->x_phy =
- get_unaligned_le16(&report[i + 6]);
- features->unit = report[i - 5];
- features->unitExpo = report[i - 3];
- i += 9;
- break;
-
- default:
- features->x_max =
- get_unaligned_le16(&report[i + 3]);
- features->x_phy =
- get_unaligned_le16(&report[i + 6]);
- features->unit = report[i + 9];
- features->unitExpo = report[i + 11];
- i += 12;
- break;
- }
- } else if (pen) {
- /* penabled only accepts exact bytes of data */
- if (features->type >= TABLETPC)
- features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- features->device_type = BTN_TOOL_PEN;
- features->x_max =
- get_unaligned_le16(&report[i + 3]);
- i += 4;
- }
- break;
-
- case HID_USAGE_Y:
- if (finger) {
- switch (features->type) {
- case TABLETPC2FG:
- case MTSCREEN:
- case MTTPC:
- features->y_max =
- get_unaligned_le16(&report[i + 3]);
- features->y_phy =
- get_unaligned_le16(&report[i + 6]);
- i += 7;
- break;
-
- case WACOM_24HDT:
- features->y_max =
- get_unaligned_le16(&report[i + 3]);
- features->y_phy =
- get_unaligned_le16(&report[i - 2]);
- i += 7;
- break;
-
- case BAMBOO_PT:
- features->y_phy =
- get_unaligned_le16(&report[i + 3]);
- features->y_max =
- get_unaligned_le16(&report[i + 6]);
- i += 12;
- break;
-
- case MTTPC_B:
- features->y_max =
- get_unaligned_le16(&report[i + 3]);
- features->y_phy =
- get_unaligned_le16(&report[i + 6]);
- i += 9;
- break;
-
- default:
- features->y_max =
- features->x_max;
- features->y_phy =
- get_unaligned_le16(&report[i + 3]);
- i += 4;
- break;
- }
- } else if (pen) {
- features->y_max =
- get_unaligned_le16(&report[i + 3]);
- i += 4;
- }
- break;
-
- case HID_USAGE_FINGER:
- finger = 1;
- i++;
- break;
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_features *features = &wacom->wacom_wac.features;
+ bool finger = (field->logical == HID_DG_FINGER) ||
+ (field->physical == HID_DG_FINGER);
+ bool pen = (field->logical == HID_DG_STYLUS) ||
+ (field->physical == HID_DG_STYLUS);
- /*
- * Requiring Stylus Usage will ignore boot mouse
- * X/Y values and some cases of invalid Digitizer X/Y
- * values commonly reported.
- */
- case HID_USAGE_STYLUS:
- pen = 1;
- i++;
- break;
+ /*
+ * Requiring Stylus Usage will ignore boot mouse
+ * X/Y values and some cases of invalid Digitizer X/Y
+ * values commonly reported.
+ */
+ if (!pen && !finger)
+ return;
- case HID_USAGE_CONTACTMAX:
- /* leave touch_max as is if predefined */
- if (!features->touch_max)
- wacom_retrieve_report_data(intf, features);
- i++;
- break;
+ if (finger && !features->touch_max)
+ /* touch device at least supports one touch point */
+ features->touch_max = 1;
- case HID_USAGE_PRESSURE:
- if (pen) {
- features->pressure_max =
- get_unaligned_le16(&report[i + 3]);
- i += 4;
- }
- break;
+ switch (usage->hid) {
+ case HID_GD_X:
+ features->x_max = field->logical_maximum;
+ if (finger) {
+ features->device_type = BTN_TOOL_FINGER;
+ features->x_phy = field->physical_maximum;
+ if (features->type != BAMBOO_PT) {
+ features->unit = field->unit;
+ features->unitExpo = field->unit_exponent;
}
- break;
-
- case HID_COLLECTION_END:
- /* reset UsagePage and Finger */
- finger = page = 0;
- break;
+ } else {
+ features->device_type = BTN_TOOL_PEN;
+ }
+ break;
+ case HID_GD_Y:
+ features->y_max = field->logical_maximum;
+ if (finger) {
+ features->y_phy = field->physical_maximum;
+ if (features->type != BAMBOO_PT) {
+ features->unit = field->unit;
+ features->unitExpo = field->unit_exponent;
+ }
+ }
+ break;
+ case HID_DG_TIPPRESSURE:
+ if (pen)
+ features->pressure_max = field->logical_maximum;
+ break;
+ }
+}
- case HID_COLLECTION:
- i++;
- switch (report[i]) {
- case HID_COLLECTION_LOGICAL:
- i += wacom_parse_logical_collection(&report[i],
- features);
- break;
+static void wacom_parse_hid(struct hid_device *hdev,
+ struct wacom_features *features)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *hreport;
+ int i, j;
+
+ /* check features first */
+ rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(hreport, &rep_enum->report_list, list) {
+ for (i = 0; i < hreport->maxfield; i++) {
+ /* Ignore if report count is out of bounds. */
+ if (hreport->field[i]->report_count < 1)
+ continue;
+
+ for (j = 0; j < hreport->field[i]->maxusage; j++) {
+ wacom_feature_mapping(hdev, hreport->field[i],
+ hreport->field[i]->usage + j);
}
- break;
}
}
- out:
- if (!features->touch_max && touch_max)
- features->touch_max = touch_max;
- result = 0;
- kfree(report);
- return result;
+ /* now check the input usages */
+ rep_enum = &hdev->report_enum[HID_INPUT_REPORT];
+ list_for_each_entry(hreport, &rep_enum->report_list, list) {
+
+ if (!hreport->maxfield)
+ continue;
+
+ for (i = 0; i < hreport->maxfield; i++)
+ for (j = 0; j < hreport->field[i]->maxusage; j++)
+ wacom_usage_mapping(hdev, hreport->field[i],
+ hreport->field[i]->usage + j);
+ }
}
-static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int length, int mode)
+static int wacom_set_device_mode(struct hid_device *hdev, int report_id,
+ int length, int mode)
{
unsigned char *rep_data;
int error = -ENOMEM, limit = 0;
@@ -534,8 +251,11 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
rep_data[0] = report_id;
rep_data[1] = mode;
- error = wacom_set_report(intf, WAC_HID_FEATURE_REPORT,
- report_id, rep_data, length, 1);
+ error = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data,
+ length, 1);
+ if (error >= 0)
+ error = wacom_get_report(hdev, HID_FEATURE_REPORT,
+ report_id, rep_data, length, 1);
} while ((error < 0 || rep_data[1] != mode) && limit++ < WAC_MSG_RETRIES);
kfree(rep_data);
@@ -543,6 +263,59 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
return error < 0 ? error : 0;
}
+static int wacom_bt_query_tablet_data(struct hid_device *hdev, u8 speed,
+ struct wacom_features *features)
+{
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ int ret;
+ u8 rep_data[2];
+
+ switch (features->type) {
+ case GRAPHIRE_BT:
+ rep_data[0] = 0x03;
+ rep_data[1] = 0x00;
+ ret = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 2,
+ 3);
+
+ if (ret >= 0) {
+ rep_data[0] = speed == 0 ? 0x05 : 0x06;
+ rep_data[1] = 0x00;
+
+ ret = wacom_set_report(hdev, HID_FEATURE_REPORT,
+ rep_data, 2, 3);
+
+ if (ret >= 0) {
+ wacom->wacom_wac.bt_high_speed = speed;
+ return 0;
+ }
+ }
+
+ /*
+ * Note that if the raw queries fail, it's not a hard failure
+ * and it is safe to continue
+ */
+ hid_warn(hdev, "failed to poke device, command %d, err %d\n",
+ rep_data[0], ret);
+ break;
+ case INTUOS4WL:
+ if (speed == 1)
+ wacom->wacom_wac.bt_features &= ~0x20;
+ else
+ wacom->wacom_wac.bt_features |= 0x20;
+
+ rep_data[0] = 0x03;
+ rep_data[1] = wacom->wacom_wac.bt_features;
+
+ ret = wacom_set_report(hdev, HID_FEATURE_REPORT, rep_data, 2,
+ 1);
+ if (ret >= 0)
+ wacom->wacom_wac.bt_high_speed = speed;
+ break;
+ }
+
+ return 0;
+}
+
/*
* Switch the tablet into its most-capable mode. Wacom tablets are
* typically configured to power-up in a mode which sends mouse-like
@@ -550,31 +323,34 @@ static int wacom_set_device_mode(struct usb_interface *intf, int report_id, int
* from the tablet, it is necessary to switch the tablet out of this
* mode and into one which sends the full range of tablet data.
*/
-static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_features *features)
+static int wacom_query_tablet_data(struct hid_device *hdev,
+ struct wacom_features *features)
{
+ if (hdev->bus == BUS_BLUETOOTH)
+ return wacom_bt_query_tablet_data(hdev, 1, features);
+
if (features->device_type == BTN_TOOL_FINGER) {
if (features->type > TABLETPC) {
/* MT Tablet PC touch */
- return wacom_set_device_mode(intf, 3, 4, 4);
+ return wacom_set_device_mode(hdev, 3, 4, 4);
}
else if (features->type == WACOM_24HDT || features->type == CINTIQ_HYBRID) {
- return wacom_set_device_mode(intf, 18, 3, 2);
+ return wacom_set_device_mode(hdev, 18, 3, 2);
}
} else if (features->device_type == BTN_TOOL_PEN) {
if (features->type <= BAMBOO_PT && features->type != WIRELESS) {
- return wacom_set_device_mode(intf, 2, 2, 2);
+ return wacom_set_device_mode(hdev, 2, 2, 2);
}
}
return 0;
}
-static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
+static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
struct wacom_features *features)
{
- int error = 0;
- struct usb_host_interface *interface = intf->cur_altsetting;
- struct hid_descriptor *hid_desc;
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct usb_interface *intf = wacom->intf;
/* default features */
features->device_type = BTN_TOOL_PEN;
@@ -599,66 +375,54 @@ static int wacom_retrieve_hid_descriptor(struct usb_interface *intf,
}
/* only devices that support touch need to retrieve the info */
- if (features->type < BAMBOO_PT) {
- goto out;
- }
-
- error = usb_get_extra_descriptor(interface, HID_DEVICET_HID, &hid_desc);
- if (error) {
- error = usb_get_extra_descriptor(&interface->endpoint[0],
- HID_DEVICET_REPORT, &hid_desc);
- if (error) {
- dev_err(&intf->dev,
- "can not retrieve extra class descriptor\n");
- goto out;
- }
- }
- error = wacom_parse_hid(intf, hid_desc, features);
+ if (features->type < BAMBOO_PT)
+ return;
- out:
- return error;
+ wacom_parse_hid(hdev, features);
}
-struct wacom_usbdev_data {
+struct wacom_hdev_data {
struct list_head list;
struct kref kref;
- struct usb_device *dev;
+ struct hid_device *dev;
struct wacom_shared shared;
};
static LIST_HEAD(wacom_udev_list);
static DEFINE_MUTEX(wacom_udev_list_lock);
-static struct usb_device *wacom_get_sibling(struct usb_device *dev, int vendor, int product)
+static bool wacom_are_sibling(struct hid_device *hdev,
+ struct hid_device *sibling)
{
- int port1;
- struct usb_device *sibling;
-
- if (vendor == 0 && product == 0)
- return dev;
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_features *features = &wacom->wacom_wac.features;
+ int vid = features->oVid;
+ int pid = features->oPid;
+ int n1,n2;
- if (dev->parent == NULL)
- return NULL;
+ if (vid == 0 && pid == 0) {
+ vid = hdev->vendor;
+ pid = hdev->product;
+ }
- usb_hub_for_each_child(dev->parent, port1, sibling) {
- struct usb_device_descriptor *d;
- if (sibling == NULL)
- continue;
+ if (vid != sibling->vendor || pid != sibling->product)
+ return false;
- d = &sibling->descriptor;
- if (d->idVendor == vendor && d->idProduct == product)
- return sibling;
- }
+ /* Compare the physical path. */
+ n1 = strrchr(hdev->phys, '.') - hdev->phys;
+ n2 = strrchr(sibling->phys, '.') - sibling->phys;
+ if (n1 != n2 || n1 <= 0 || n2 <= 0)
+ return false;
- return NULL;
+ return !strncmp(hdev->phys, sibling->phys, n1);
}
-static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
+static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
{
- struct wacom_usbdev_data *data;
+ struct wacom_hdev_data *data;
list_for_each_entry(data, &wacom_udev_list, list) {
- if (data->dev == dev) {
+ if (wacom_are_sibling(hdev, data->dev)) {
kref_get(&data->kref);
return data;
}
@@ -667,28 +431,29 @@ static struct wacom_usbdev_data *wacom_get_usbdev_data(struct usb_device *dev)
return NULL;
}
-static int wacom_add_shared_data(struct wacom_wac *wacom,
- struct usb_device *dev)
+static int wacom_add_shared_data(struct hid_device *hdev)
{
- struct wacom_usbdev_data *data;
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct wacom_hdev_data *data;
int retval = 0;
mutex_lock(&wacom_udev_list_lock);
- data = wacom_get_usbdev_data(dev);
+ data = wacom_get_hdev_data(hdev);
if (!data) {
- data = kzalloc(sizeof(struct wacom_usbdev_data), GFP_KERNEL);
+ data = kzalloc(sizeof(struct wacom_hdev_data), GFP_KERNEL);
if (!data) {
retval = -ENOMEM;
goto out;
}
kref_init(&data->kref);
- data->dev = dev;
+ data->dev = hdev;
list_add_tail(&data->list, &wacom_udev_list);
}
- wacom->shared = &data->shared;
+ wacom_wac->shared = &data->shared;
out:
mutex_unlock(&wacom_udev_list_lock);
@@ -697,8 +462,8 @@ out:
static void wacom_release_shared_data(struct kref *kref)
{
- struct wacom_usbdev_data *data =
- container_of(kref, struct wacom_usbdev_data, kref);
+ struct wacom_hdev_data *data =
+ container_of(kref, struct wacom_hdev_data, kref);
mutex_lock(&wacom_udev_list_lock);
list_del(&data->list);
@@ -709,10 +474,10 @@ static void wacom_release_shared_data(struct kref *kref)
static void wacom_remove_shared_data(struct wacom_wac *wacom)
{
- struct wacom_usbdev_data *data;
+ struct wacom_hdev_data *data;
if (wacom->shared) {
- data = container_of(wacom->shared, struct wacom_usbdev_data, shared);
+ data = container_of(wacom->shared, struct wacom_hdev_data, shared);
kref_put(&data->kref, wacom_release_shared_data);
wacom->shared = NULL;
}
@@ -755,38 +520,40 @@ static int wacom_led_control(struct wacom *wacom)
buf[4] = wacom->led.img_lum;
}
- retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_LED_CONTROL,
- buf, 9, WAC_CMD_RETRIES);
+ retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 9,
+ WAC_CMD_RETRIES);
kfree(buf);
return retval;
}
-static int wacom_led_putimage(struct wacom *wacom, int button_id, const void *img)
+static int wacom_led_putimage(struct wacom *wacom, int button_id, u8 xfer_id,
+ const unsigned len, const void *img)
{
unsigned char *buf;
int i, retval;
+ const unsigned chunk_len = len / 4; /* 4 chunks are needed to be sent */
- buf = kzalloc(259, GFP_KERNEL);
+ buf = kzalloc(chunk_len + 3 , GFP_KERNEL);
if (!buf)
return -ENOMEM;
/* Send 'start' command */
buf[0] = WAC_CMD_ICON_START;
buf[1] = 1;
- retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_START,
- buf, 2, WAC_CMD_RETRIES);
+ retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 2,
+ WAC_CMD_RETRIES);
if (retval < 0)
goto out;
- buf[0] = WAC_CMD_ICON_XFER;
+ buf[0] = xfer_id;
buf[1] = button_id & 0x07;
for (i = 0; i < 4; i++) {
buf[2] = i;
- memcpy(buf + 3, img + i * 256, 256);
+ memcpy(buf + 3, img + i * chunk_len, chunk_len);
- retval = wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_XFER,
- buf, 259, WAC_CMD_RETRIES);
+ retval = wacom_set_report(wacom->hdev, HID_FEATURE_REPORT,
+ buf, chunk_len + 3, WAC_CMD_RETRIES);
if (retval < 0)
break;
}
@@ -794,8 +561,8 @@ static int wacom_led_putimage(struct wacom *wacom, int button_id, const void *im
/* Send 'stop' */
buf[0] = WAC_CMD_ICON_START;
buf[1] = 0;
- wacom_set_report(wacom->intf, 0x03, WAC_CMD_ICON_START,
- buf, 2, WAC_CMD_RETRIES);
+ wacom_set_report(wacom->hdev, HID_FEATURE_REPORT, buf, 2,
+ WAC_CMD_RETRIES);
out:
kfree(buf);
@@ -805,7 +572,8 @@ out:
static ssize_t wacom_led_select_store(struct device *dev, int set_id,
const char *buf, size_t count)
{
- struct wacom *wacom = dev_get_drvdata(dev);
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct wacom *wacom = hid_get_drvdata(hdev);
unsigned int id;
int err;
@@ -832,7 +600,8 @@ static ssize_t wacom_led##SET_ID##_select_store(struct device *dev, \
static ssize_t wacom_led##SET_ID##_select_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- struct wacom *wacom = dev_get_drvdata(dev); \
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);\
+ struct wacom *wacom = hid_get_drvdata(hdev); \
return snprintf(buf, 2, "%d\n", wacom->led.select[SET_ID]); \
} \
static DEVICE_ATTR(status_led##SET_ID##_select, S_IWUSR | S_IRUSR, \
@@ -866,7 +635,8 @@ static ssize_t wacom_luminance_store(struct wacom *wacom, u8 *dest,
static ssize_t wacom_##name##_luminance_store(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
- struct wacom *wacom = dev_get_drvdata(dev); \
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);\
+ struct wacom *wacom = hid_get_drvdata(hdev); \
\
return wacom_luminance_store(wacom, &wacom->led.field, \
buf, count); \
@@ -881,15 +651,26 @@ DEVICE_LUMINANCE_ATTR(buttons, img_lum);
static ssize_t wacom_button_image_store(struct device *dev, int button_id,
const char *buf, size_t count)
{
- struct wacom *wacom = dev_get_drvdata(dev);
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct wacom *wacom = hid_get_drvdata(hdev);
int err;
+ unsigned len;
+ u8 xfer_id;
+
+ if (hdev->bus == BUS_BLUETOOTH) {
+ len = 256;
+ xfer_id = WAC_CMD_ICON_BT_XFER;
+ } else {
+ len = 1024;
+ xfer_id = WAC_CMD_ICON_XFER;
+ }
- if (count != 1024)
+ if (count != len)
return -EINVAL;
mutex_lock(&wacom->lock);
- err = wacom_led_putimage(wacom, button_id, buf);
+ err = wacom_led_putimage(wacom, button_id, xfer_id, len, buf);
mutex_unlock(&wacom->lock);
@@ -965,13 +746,14 @@ static int wacom_initialize_leds(struct wacom *wacom)
switch (wacom->wacom_wac.features.type) {
case INTUOS4S:
case INTUOS4:
+ case INTUOS4WL:
case INTUOS4L:
wacom->led.select[0] = 0;
wacom->led.select[1] = 0;
wacom->led.llv = 10;
wacom->led.hlv = 20;
wacom->led.img_lum = 10;
- error = sysfs_create_group(&wacom->intf->dev.kobj,
+ error = sysfs_create_group(&wacom->hdev->dev.kobj,
&intuos4_led_attr_group);
break;
@@ -983,7 +765,7 @@ static int wacom_initialize_leds(struct wacom *wacom)
wacom->led.hlv = 0;
wacom->led.img_lum = 0;
- error = sysfs_create_group(&wacom->intf->dev.kobj,
+ error = sysfs_create_group(&wacom->hdev->dev.kobj,
&cintiq_led_attr_group);
break;
@@ -1000,7 +782,7 @@ static int wacom_initialize_leds(struct wacom *wacom)
wacom->led.hlv = 0;
wacom->led.img_lum = 0;
- error = sysfs_create_group(&wacom->intf->dev.kobj,
+ error = sysfs_create_group(&wacom->hdev->dev.kobj,
&intuos5_led_attr_group);
} else
return 0;
@@ -1011,28 +793,35 @@ static int wacom_initialize_leds(struct wacom *wacom)
}
if (error) {
- dev_err(&wacom->intf->dev,
+ hid_err(wacom->hdev,
"cannot create sysfs group err: %d\n", error);
return error;
}
wacom_led_control(wacom);
+ wacom->led_initialized = true;
return 0;
}
static void wacom_destroy_leds(struct wacom *wacom)
{
+ if (!wacom->led_initialized)
+ return;
+
+ wacom->led_initialized = false;
+
switch (wacom->wacom_wac.features.type) {
case INTUOS4S:
case INTUOS4:
+ case INTUOS4WL:
case INTUOS4L:
- sysfs_remove_group(&wacom->intf->dev.kobj,
+ sysfs_remove_group(&wacom->hdev->dev.kobj,
&intuos4_led_attr_group);
break;
case WACOM_24HD:
case WACOM_21UX2:
- sysfs_remove_group(&wacom->intf->dev.kobj,
+ sysfs_remove_group(&wacom->hdev->dev.kobj,
&cintiq_led_attr_group);
break;
@@ -1043,17 +832,24 @@ static void wacom_destroy_leds(struct wacom *wacom)
case INTUOSPM:
case INTUOSPL:
if (wacom->wacom_wac.features.device_type == BTN_TOOL_PEN)
- sysfs_remove_group(&wacom->intf->dev.kobj,
+ sysfs_remove_group(&wacom->hdev->dev.kobj,
&intuos5_led_attr_group);
break;
}
}
static enum power_supply_property wacom_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_CAPACITY
};
+static enum power_supply_property wacom_ac_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_SCOPE,
+};
+
static int wacom_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -1067,7 +863,16 @@ static int wacom_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval =
- wacom->wacom_wac.battery_capacity * 100 / 31;
+ wacom->wacom_wac.battery_capacity;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (wacom->wacom_wac.bat_charging)
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (wacom->wacom_wac.battery_capacity == 100 &&
+ wacom->wacom_wac.ps_connected)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
default:
ret = -EINVAL;
@@ -1077,74 +882,201 @@ static int wacom_battery_get_property(struct power_supply *psy,
return ret;
}
+static int wacom_ac_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct wacom *wacom = container_of(psy, struct wacom, ac);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ /* fall through */
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = wacom->wacom_wac.ps_connected;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
static int wacom_initialize_battery(struct wacom *wacom)
{
- int error = 0;
+ static atomic_t battery_no = ATOMIC_INIT(0);
+ int error;
+ unsigned long n;
+
+ if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) {
+ n = atomic_inc_return(&battery_no) - 1;
- if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR) {
wacom->battery.properties = wacom_battery_props;
wacom->battery.num_properties = ARRAY_SIZE(wacom_battery_props);
wacom->battery.get_property = wacom_battery_get_property;
- wacom->battery.name = "wacom_battery";
+ sprintf(wacom->wacom_wac.bat_name, "wacom_battery_%ld", n);
+ wacom->battery.name = wacom->wacom_wac.bat_name;
wacom->battery.type = POWER_SUPPLY_TYPE_BATTERY;
wacom->battery.use_for_apm = 0;
- error = power_supply_register(&wacom->usbdev->dev,
+ wacom->ac.properties = wacom_ac_props;
+ wacom->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
+ wacom->ac.get_property = wacom_ac_get_property;
+ sprintf(wacom->wacom_wac.ac_name, "wacom_ac_%ld", n);
+ wacom->ac.name = wacom->wacom_wac.ac_name;
+ wacom->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ wacom->ac.use_for_apm = 0;
+
+ error = power_supply_register(&wacom->hdev->dev,
&wacom->battery);
+ if (error)
+ return error;
+
+ power_supply_powers(&wacom->battery, &wacom->hdev->dev);
- if (!error)
- power_supply_powers(&wacom->battery,
- &wacom->usbdev->dev);
+ error = power_supply_register(&wacom->hdev->dev, &wacom->ac);
+ if (error) {
+ power_supply_unregister(&wacom->battery);
+ return error;
+ }
+
+ power_supply_powers(&wacom->ac, &wacom->hdev->dev);
}
- return error;
+ return 0;
}
static void wacom_destroy_battery(struct wacom *wacom)
{
- if (wacom->wacom_wac.features.quirks & WACOM_QUIRK_MONITOR &&
- wacom->battery.dev) {
+ if ((wacom->wacom_wac.features.quirks & WACOM_QUIRK_BATTERY) &&
+ wacom->battery.dev) {
power_supply_unregister(&wacom->battery);
wacom->battery.dev = NULL;
+ power_supply_unregister(&wacom->ac);
+ wacom->ac.dev = NULL;
}
}
-static int wacom_register_input(struct wacom *wacom)
+static ssize_t wacom_show_speed(struct device *dev,
+ struct device_attribute
+ *attr, char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct wacom *wacom = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%i\n", wacom->wacom_wac.bt_high_speed);
+}
+
+static ssize_t wacom_store_speed(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct wacom *wacom = hid_get_drvdata(hdev);
+ u8 new_speed;
+
+ if (kstrtou8(buf, 0, &new_speed))
+ return -EINVAL;
+
+ if (new_speed != 0 && new_speed != 1)
+ return -EINVAL;
+
+ wacom_bt_query_tablet_data(hdev, new_speed, &wacom->wacom_wac.features);
+
+ return count;
+}
+
+static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
+ wacom_show_speed, wacom_store_speed);
+
+static struct input_dev *wacom_allocate_input(struct wacom *wacom)
{
struct input_dev *input_dev;
- struct usb_interface *intf = wacom->intf;
- struct usb_device *dev = interface_to_usbdev(intf);
+ struct hid_device *hdev = wacom->hdev;
struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
- int error;
input_dev = input_allocate_device();
- if (!input_dev) {
- error = -ENOMEM;
- goto fail1;
- }
+ if (!input_dev)
+ return NULL;
input_dev->name = wacom_wac->name;
- input_dev->dev.parent = &intf->dev;
+ input_dev->phys = hdev->phys;
+ input_dev->dev.parent = &hdev->dev;
input_dev->open = wacom_open;
input_dev->close = wacom_close;
- usb_to_input_id(dev, &input_dev->id);
+ input_dev->uniq = hdev->uniq;
+ input_dev->id.bustype = hdev->bus;
+ input_dev->id.vendor = hdev->vendor;
+ input_dev->id.product = hdev->product;
+ input_dev->id.version = hdev->version;
input_set_drvdata(input_dev, wacom);
+ return input_dev;
+}
+
+static void wacom_unregister_inputs(struct wacom *wacom)
+{
+ if (wacom->wacom_wac.input)
+ input_unregister_device(wacom->wacom_wac.input);
+ if (wacom->wacom_wac.pad_input)
+ input_unregister_device(wacom->wacom_wac.pad_input);
+ wacom->wacom_wac.input = NULL;
+ wacom->wacom_wac.pad_input = NULL;
+}
+
+static int wacom_register_inputs(struct wacom *wacom)
+{
+ struct input_dev *input_dev, *pad_input_dev;
+ struct wacom_wac *wacom_wac = &(wacom->wacom_wac);
+ int error;
+
+ input_dev = wacom_allocate_input(wacom);
+ pad_input_dev = wacom_allocate_input(wacom);
+ if (!input_dev || !pad_input_dev) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+
wacom_wac->input = input_dev;
+ wacom_wac->pad_input = pad_input_dev;
+ wacom_wac->pad_input->name = wacom_wac->pad_name;
+
error = wacom_setup_input_capabilities(input_dev, wacom_wac);
if (error)
- goto fail1;
+ goto fail2;
error = input_register_device(input_dev);
if (error)
goto fail2;
+ error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
+ if (error) {
+ /* no pad in use on this interface */
+ input_free_device(pad_input_dev);
+ wacom_wac->pad_input = NULL;
+ pad_input_dev = NULL;
+ } else {
+ error = input_register_device(pad_input_dev);
+ if (error)
+ goto fail3;
+ }
+
return 0;
+fail3:
+ input_unregister_device(input_dev);
+ input_dev = NULL;
fail2:
- input_free_device(input_dev);
wacom_wac->input = NULL;
+ wacom_wac->pad_input = NULL;
fail1:
+ if (input_dev)
+ input_free_device(input_dev);
+ if (pad_input_dev)
+ input_free_device(pad_input_dev);
return error;
}
@@ -1153,6 +1085,7 @@ static void wacom_wireless_work(struct work_struct *work)
struct wacom *wacom = container_of(work, struct wacom, work);
struct usb_device *usbdev = wacom->usbdev;
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+ struct hid_device *hdev1, *hdev2;
struct wacom *wacom1, *wacom2;
struct wacom_wac *wacom_wac1, *wacom_wac2;
int error;
@@ -1165,50 +1098,49 @@ static void wacom_wireless_work(struct work_struct *work)
wacom_destroy_battery(wacom);
/* Stylus interface */
- wacom1 = usb_get_intfdata(usbdev->config->interface[1]);
+ hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
+ wacom1 = hid_get_drvdata(hdev1);
wacom_wac1 = &(wacom1->wacom_wac);
- if (wacom_wac1->input)
- input_unregister_device(wacom_wac1->input);
- wacom_wac1->input = NULL;
+ wacom_unregister_inputs(wacom1);
/* Touch interface */
- wacom2 = usb_get_intfdata(usbdev->config->interface[2]);
+ hdev2 = usb_get_intfdata(usbdev->config->interface[2]);
+ wacom2 = hid_get_drvdata(hdev2);
wacom_wac2 = &(wacom2->wacom_wac);
- if (wacom_wac2->input)
- input_unregister_device(wacom_wac2->input);
- wacom_wac2->input = NULL;
+ wacom_unregister_inputs(wacom2);
if (wacom_wac->pid == 0) {
- dev_info(&wacom->intf->dev, "wireless tablet disconnected\n");
+ hid_info(wacom->hdev, "wireless tablet disconnected\n");
+ wacom_wac1->shared->type = 0;
} else {
- const struct usb_device_id *id = wacom_ids;
+ const struct hid_device_id *id = wacom_ids;
- dev_info(&wacom->intf->dev,
- "wireless tablet connected with PID %x\n",
+ hid_info(wacom->hdev, "wireless tablet connected with PID %x\n",
wacom_wac->pid);
- while (id->match_flags) {
- if (id->idVendor == USB_VENDOR_ID_WACOM &&
- id->idProduct == wacom_wac->pid)
+ while (id->bus) {
+ if (id->vendor == USB_VENDOR_ID_WACOM &&
+ id->product == wacom_wac->pid)
break;
id++;
}
- if (!id->match_flags) {
- dev_info(&wacom->intf->dev,
- "ignoring unknown PID.\n");
+ if (!id->bus) {
+ hid_info(wacom->hdev, "ignoring unknown PID.\n");
return;
}
/* Stylus interface */
wacom_wac1->features =
- *((struct wacom_features *)id->driver_info);
+ *((struct wacom_features *)id->driver_data);
wacom_wac1->features.device_type = BTN_TOOL_PEN;
snprintf(wacom_wac1->name, WACOM_NAME_MAX, "%s (WL) Pen",
wacom_wac1->features.name);
+ snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
+ wacom_wac1->features.name);
wacom_wac1->shared->touch_max = wacom_wac1->features.touch_max;
wacom_wac1->shared->type = wacom_wac1->features.type;
- error = wacom_register_input(wacom1);
+ error = wacom_register_inputs(wacom1);
if (error)
goto fail;
@@ -1216,7 +1148,7 @@ static void wacom_wireless_work(struct work_struct *work)
if (wacom_wac1->features.touch_max ||
wacom_wac1->features.type == INTUOSHT) {
wacom_wac2->features =
- *((struct wacom_features *)id->driver_info);
+ *((struct wacom_features *)id->driver_data);
wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
wacom_wac2->features.device_type = BTN_TOOL_FINGER;
wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
@@ -1226,7 +1158,9 @@ static void wacom_wireless_work(struct work_struct *work)
else
snprintf(wacom_wac2->name, WACOM_NAME_MAX,
"%s (WL) Pad",wacom_wac2->features.name);
- error = wacom_register_input(wacom2);
+ snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
+ "%s (WL) Pad", wacom_wac2->features.name);
+ error = wacom_register_inputs(wacom2);
if (error)
goto fail;
@@ -1243,15 +1177,8 @@ static void wacom_wireless_work(struct work_struct *work)
return;
fail:
- if (wacom_wac2->input) {
- input_unregister_device(wacom_wac2->input);
- wacom_wac2->input = NULL;
- }
-
- if (wacom_wac1->input) {
- input_unregister_device(wacom_wac1->input);
- wacom_wac1->input = NULL;
- }
+ wacom_unregister_inputs(wacom1);
+ wacom_unregister_inputs(wacom2);
return;
}
@@ -1282,69 +1209,89 @@ static void wacom_calculate_res(struct wacom_features *features)
features->unitExpo);
}
-static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int wacom_hid_report_len(struct hid_report *report)
+{
+ /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+ return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+}
+
+static size_t wacom_compute_pktlen(struct hid_device *hdev)
+{
+ struct hid_report_enum *report_enum;
+ struct hid_report *report;
+ size_t size = 0;
+
+ report_enum = hdev->report_enum + HID_INPUT_REPORT;
+
+ list_for_each_entry(report, &report_enum->report_list, list) {
+ size_t report_size = wacom_hid_report_len(report);
+ if (report_size > size)
+ size = report_size;
+ }
+
+ return size;
+}
+
+static int wacom_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_endpoint_descriptor *endpoint;
struct wacom *wacom;
struct wacom_wac *wacom_wac;
struct wacom_features *features;
int error;
- if (!id->driver_info)
+ if (!id->driver_data)
return -EINVAL;
wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
if (!wacom)
return -ENOMEM;
+ hid_set_drvdata(hdev, wacom);
+ wacom->hdev = hdev;
+
+ /* ask for the report descriptor to be loaded by HID */
+ error = hid_parse(hdev);
+ if (error) {
+ hid_err(hdev, "parse failed\n");
+ goto fail1;
+ }
+
wacom_wac = &wacom->wacom_wac;
- wacom_wac->features = *((struct wacom_features *)id->driver_info);
+ wacom_wac->features = *((struct wacom_features *)id->driver_data);
features = &wacom_wac->features;
+ features->pktlen = wacom_compute_pktlen(hdev);
if (features->pktlen > WACOM_PKGLEN_MAX) {
error = -EINVAL;
goto fail1;
}
- wacom_wac->data = usb_alloc_coherent(dev, WACOM_PKGLEN_MAX,
- GFP_KERNEL, &wacom->data_dma);
- if (!wacom_wac->data) {
- error = -ENOMEM;
+ if (features->check_for_hid_type && features->hid_type != hdev->type) {
+ error = -ENODEV;
goto fail1;
}
- wacom->irq = usb_alloc_urb(0, GFP_KERNEL);
- if (!wacom->irq) {
- error = -ENOMEM;
- goto fail2;
- }
-
wacom->usbdev = dev;
wacom->intf = intf;
mutex_init(&wacom->lock);
INIT_WORK(&wacom->work, wacom_wireless_work);
- usb_make_path(dev, wacom->phys, sizeof(wacom->phys));
- strlcat(wacom->phys, "/input0", sizeof(wacom->phys));
-
- endpoint = &intf->cur_altsetting->endpoint[0].desc;
/* set the default size in case we do not get them from hid */
wacom_set_default_phy(features);
/* Retrieve the physical and logical size for touch devices */
- error = wacom_retrieve_hid_descriptor(intf, features);
- if (error)
- goto fail3;
+ wacom_retrieve_hid_descriptor(hdev, features);
/*
* Intuos5 has no useful data about its touch interface in its
- * HID descriptor. If this is the touch interface (wMaxPacketSize
+ * HID descriptor. If this is the touch interface (PacketSize
* of WACOM_PKGLEN_BBTOUCH3), override the table values.
*/
if (features->type >= INTUOS5S && features->type <= INTUOSHT) {
- if (endpoint->wMaxPacketSize == WACOM_PKGLEN_BBTOUCH3) {
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
features->device_type = BTN_TOOL_FINGER;
- features->pktlen = WACOM_PKGLEN_BBTOUCH3;
features->x_max = 4096;
features->y_max = 4096;
@@ -1353,20 +1300,35 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
}
}
+ /*
+ * Same thing for Bamboo 3rd gen.
+ */
+ if ((features->type == BAMBOO_PT) &&
+ (features->pktlen == WACOM_PKGLEN_BBTOUCH3) &&
+ (features->device_type == BTN_TOOL_PEN)) {
+ features->device_type = BTN_TOOL_FINGER;
+
+ features->x_max = 4096;
+ features->y_max = 4096;
+ }
+
+ if (hdev->bus == BUS_BLUETOOTH)
+ features->quirks |= WACOM_QUIRK_BATTERY;
+
wacom_setup_device_quirks(features);
/* set unit to "100th of a mm" for devices not reported by HID */
if (!features->unit) {
features->unit = 0x11;
- features->unitExpo = 16 - 3;
+ features->unitExpo = -3;
}
wacom_calculate_res(features);
strlcpy(wacom_wac->name, features->name, sizeof(wacom_wac->name));
+ snprintf(wacom_wac->pad_name, sizeof(wacom_wac->pad_name),
+ "%s Pad", features->name);
if (features->quirks & WACOM_QUIRK_MULTI_INPUT) {
- struct usb_device *other_dev;
-
/* Append the device type to the name */
if (features->device_type != BTN_TOOL_FINGER)
strlcat(wacom_wac->name, " Pen", WACOM_NAME_MAX);
@@ -1375,43 +1337,49 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
else
strlcat(wacom_wac->name, " Pad", WACOM_NAME_MAX);
- other_dev = wacom_get_sibling(dev, features->oVid, features->oPid);
- if (other_dev == NULL || wacom_get_usbdev_data(other_dev) == NULL)
- other_dev = dev;
- error = wacom_add_shared_data(wacom_wac, other_dev);
+ error = wacom_add_shared_data(hdev);
if (error)
- goto fail3;
+ goto fail1;
}
- usb_fill_int_urb(wacom->irq, dev,
- usb_rcvintpipe(dev, endpoint->bEndpointAddress),
- wacom_wac->data, features->pktlen,
- wacom_sys_irq, wacom, endpoint->bInterval);
- wacom->irq->transfer_dma = wacom->data_dma;
- wacom->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
error = wacom_initialize_leds(wacom);
if (error)
- goto fail4;
+ goto fail2;
+
+ if (!(features->quirks & WACOM_QUIRK_MONITOR) &&
+ (features->quirks & WACOM_QUIRK_BATTERY)) {
+ error = wacom_initialize_battery(wacom);
+ if (error)
+ goto fail3;
+ }
if (!(features->quirks & WACOM_QUIRK_NO_INPUT)) {
- error = wacom_register_input(wacom);
+ error = wacom_register_inputs(wacom);
if (error)
- goto fail5;
+ goto fail4;
}
- /* Note that if query fails it is not a hard failure */
- wacom_query_tablet_data(intf, features);
+ if (hdev->bus == BUS_BLUETOOTH) {
+ error = device_create_file(&hdev->dev, &dev_attr_speed);
+ if (error)
+ hid_warn(hdev,
+ "can't create sysfs speed attribute err: %d\n",
+ error);
+ }
- usb_set_intfdata(intf, wacom);
+ /* Note that if query fails it is not a hard failure */
+ wacom_query_tablet_data(hdev, features);
- if (features->quirks & WACOM_QUIRK_MONITOR) {
- if (usb_submit_urb(wacom->irq, GFP_KERNEL)) {
- error = -EIO;
- goto fail5;
- }
+ /* Regular HID work starts now */
+ error = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (error) {
+ hid_err(hdev, "hw start failed\n");
+ goto fail5;
}
+ if (features->quirks & WACOM_QUIRK_MONITOR)
+ error = hid_hw_open(hdev);
+
if (wacom_wac->features.type == INTUOSHT && wacom_wac->features.touch_max) {
if (wacom_wac->features.device_type == BTN_TOOL_FINGER)
wacom_wac->shared->touch_input = wacom_wac->input;
@@ -1419,79 +1387,70 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
return 0;
- fail5: wacom_destroy_leds(wacom);
- fail4: wacom_remove_shared_data(wacom_wac);
- fail3: usb_free_urb(wacom->irq);
- fail2: usb_free_coherent(dev, WACOM_PKGLEN_MAX, wacom_wac->data, wacom->data_dma);
+ fail5: if (hdev->bus == BUS_BLUETOOTH)
+ device_remove_file(&hdev->dev, &dev_attr_speed);
+ wacom_unregister_inputs(wacom);
+ fail4: wacom_destroy_battery(wacom);
+ fail3: wacom_destroy_leds(wacom);
+ fail2: wacom_remove_shared_data(wacom_wac);
fail1: kfree(wacom);
+ hid_set_drvdata(hdev, NULL);
return error;
}
-static void wacom_disconnect(struct usb_interface *intf)
+static void wacom_remove(struct hid_device *hdev)
{
- struct wacom *wacom = usb_get_intfdata(intf);
+ struct wacom *wacom = hid_get_drvdata(hdev);
- usb_set_intfdata(intf, NULL);
+ hid_hw_stop(hdev);
- usb_kill_urb(wacom->irq);
cancel_work_sync(&wacom->work);
- if (wacom->wacom_wac.input)
- input_unregister_device(wacom->wacom_wac.input);
+ wacom_unregister_inputs(wacom);
+ if (hdev->bus == BUS_BLUETOOTH)
+ device_remove_file(&hdev->dev, &dev_attr_speed);
wacom_destroy_battery(wacom);
wacom_destroy_leds(wacom);
- usb_free_urb(wacom->irq);
- usb_free_coherent(interface_to_usbdev(intf), WACOM_PKGLEN_MAX,
- wacom->wacom_wac.data, wacom->data_dma);
wacom_remove_shared_data(&wacom->wacom_wac);
- kfree(wacom);
-}
-
-static int wacom_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct wacom *wacom = usb_get_intfdata(intf);
-
- mutex_lock(&wacom->lock);
- usb_kill_urb(wacom->irq);
- mutex_unlock(&wacom->lock);
- return 0;
+ hid_set_drvdata(hdev, NULL);
+ kfree(wacom);
}
-static int wacom_resume(struct usb_interface *intf)
+static int wacom_resume(struct hid_device *hdev)
{
- struct wacom *wacom = usb_get_intfdata(intf);
+ struct wacom *wacom = hid_get_drvdata(hdev);
struct wacom_features *features = &wacom->wacom_wac.features;
- int rv = 0;
mutex_lock(&wacom->lock);
/* switch to wacom mode first */
- wacom_query_tablet_data(intf, features);
+ wacom_query_tablet_data(hdev, features);
wacom_led_control(wacom);
- if ((wacom->open || (features->quirks & WACOM_QUIRK_MONITOR)) &&
- usb_submit_urb(wacom->irq, GFP_NOIO) < 0)
- rv = -EIO;
-
mutex_unlock(&wacom->lock);
- return rv;
+ return 0;
}
-static int wacom_reset_resume(struct usb_interface *intf)
+static int wacom_reset_resume(struct hid_device *hdev)
{
- return wacom_resume(intf);
+ return wacom_resume(hdev);
}
-static struct usb_driver wacom_driver = {
+static struct hid_driver wacom_driver = {
.name = "wacom",
.id_table = wacom_ids,
.probe = wacom_probe,
- .disconnect = wacom_disconnect,
- .suspend = wacom_suspend,
+ .remove = wacom_remove,
+#ifdef CONFIG_PM
.resume = wacom_resume,
.reset_resume = wacom_reset_resume,
- .supports_autosuspend = 1,
+#endif
+ .raw_event = wacom_raw_event,
};
+module_hid_driver(wacom_driver);
-module_usb_driver(wacom_driver);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE(DRIVER_LICENSE);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/hid/wacom_wac.c
index e73cf2c71f35..aa6a08eb7ad6 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -25,11 +25,23 @@
#define WACOM_INTUOS_RES 100
#define WACOM_INTUOS3_RES 200
-/* Scale factor relating reported contact size to logical contact area.
+/*
+ * Scale factor relating reported contact size to logical contact area.
* 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo
*/
#define WACOM_CONTACT_AREA_SCALE 2607
+/*
+ * Percent of battery capacity for Graphire.
+ * 8th value means AC online and show 100% capacity.
+ */
+static unsigned short batcap_gr[8] = { 1, 15, 25, 35, 50, 70, 100, 100 };
+
+/*
+ * Percent of battery capacity for Intuos4 WL, AC has a separate bit.
+ */
+static unsigned short batcap_i4[8] = { 1, 15, 30, 45, 60, 70, 85, 100 };
+
static int wacom_penpartner_irq(struct wacom_wac *wacom)
{
unsigned char *data = wacom->data;
@@ -217,17 +229,13 @@ static int wacom_dtus_irq(struct wacom_wac *wacom)
"%s: received unknown report #%d", __func__, data[0]);
return 0;
} else if (data[0] == WACOM_REPORT_DTUSPAD) {
+ input = wacom->pad_input;
input_report_key(input, BTN_0, (data[1] & 0x01));
input_report_key(input, BTN_1, (data[1] & 0x02));
input_report_key(input, BTN_2, (data[1] & 0x04));
input_report_key(input, BTN_3, (data[1] & 0x08));
input_report_abs(input, ABS_MISC,
data[1] & 0x0f ? PAD_DEVICE_ID : 0);
- /*
- * Serial number is required when expresskeys are
- * reported through pen interface.
- */
- input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
return 1;
} else {
prox = data[1] & 0x80;
@@ -257,7 +265,6 @@ static int wacom_dtus_irq(struct wacom_wac *wacom)
wacom->id[0] = 0;
input_report_key(input, wacom->tool[0], prox);
input_report_abs(input, ABS_MISC, wacom->id[0]);
- input_event(input, EV_MSC, MSC_SERIAL, 1);
return 1;
}
}
@@ -267,11 +274,20 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
struct input_dev *input = wacom->input;
+ struct input_dev *pad_input = wacom->pad_input;
+ int battery_capacity, ps_connected;
int prox;
int rw = 0;
int retval = 0;
- if (data[0] != WACOM_REPORT_PENABLED) {
+ if (features->type == GRAPHIRE_BT) {
+ if (data[0] != WACOM_REPORT_PENABLED_BT) {
+ dev_dbg(input->dev.parent,
+ "%s: received unknown report #%d\n", __func__,
+ data[0]);
+ goto exit;
+ }
+ } else if (data[0] != WACOM_REPORT_PENABLED) {
dev_dbg(input->dev.parent,
"%s: received unknown report #%d\n", __func__, data[0]);
goto exit;
@@ -305,7 +321,12 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
if (wacom->tool[0] != BTN_TOOL_MOUSE) {
- input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8));
+ if (features->type == GRAPHIRE_BT)
+ input_report_abs(input, ABS_PRESSURE, data[6] |
+ (((__u16) (data[1] & 0x08)) << 5));
+ else
+ input_report_abs(input, ABS_PRESSURE, data[6] |
+ ((data[7] & 0x03) << 8));
input_report_key(input, BTN_TOUCH, data[1] & 0x01);
input_report_key(input, BTN_STYLUS, data[1] & 0x02);
input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
@@ -316,6 +337,20 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
features->type == WACOM_MO) {
input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f);
rw = (data[7] & 0x04) - (data[7] & 0x03);
+ } else if (features->type == GRAPHIRE_BT) {
+ /* Compute distance between mouse and tablet */
+ rw = 44 - (data[6] >> 2);
+ rw = clamp_val(rw, 0, 31);
+ input_report_abs(input, ABS_DISTANCE, rw);
+ if (((data[1] >> 5) & 3) == 2) {
+ /* Mouse with wheel */
+ input_report_key(input, BTN_MIDDLE,
+ data[1] & 0x04);
+ rw = (data[6] & 0x01) ? -1 :
+ (data[6] & 0x02) ? 1 : 0;
+ } else {
+ rw = 0;
+ }
} else {
input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f);
rw = -(signed char)data[6];
@@ -327,7 +362,6 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
wacom->id[0] = 0;
input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */
input_report_key(input, wacom->tool[0], prox);
- input_event(input, EV_MSC, MSC_SERIAL, 1);
input_sync(input); /* sync last event */
}
@@ -337,14 +371,13 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
prox = data[7] & 0xf8;
if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- input_report_key(input, BTN_BACK, (data[7] & 0x40));
- input_report_key(input, BTN_FORWARD, (data[7] & 0x80));
+ input_report_key(pad_input, BTN_BACK, (data[7] & 0x40));
+ input_report_key(pad_input, BTN_FORWARD, (data[7] & 0x80));
rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3);
- input_report_rel(input, REL_WHEEL, rw);
+ input_report_rel(pad_input, REL_WHEEL, rw);
if (!prox)
wacom->id[1] = 0;
- input_report_abs(input, ABS_MISC, wacom->id[1]);
- input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_abs(pad_input, ABS_MISC, wacom->id[1]);
retval = 1;
}
break;
@@ -353,19 +386,43 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
prox = (data[7] & 0xf8) || data[8];
if (prox || wacom->id[1]) {
wacom->id[1] = PAD_DEVICE_ID;
- input_report_key(input, BTN_BACK, (data[7] & 0x08));
- input_report_key(input, BTN_LEFT, (data[7] & 0x20));
- input_report_key(input, BTN_FORWARD, (data[7] & 0x10));
- input_report_key(input, BTN_RIGHT, (data[7] & 0x40));
- input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f));
+ input_report_key(pad_input, BTN_BACK, (data[7] & 0x08));
+ input_report_key(pad_input, BTN_LEFT, (data[7] & 0x20));
+ input_report_key(pad_input, BTN_FORWARD, (data[7] & 0x10));
+ input_report_key(pad_input, BTN_RIGHT, (data[7] & 0x40));
+ input_report_abs(pad_input, ABS_WHEEL, (data[8] & 0x7f));
+ if (!prox)
+ wacom->id[1] = 0;
+ input_report_abs(pad_input, ABS_MISC, wacom->id[1]);
+ retval = 1;
+ }
+ break;
+ case GRAPHIRE_BT:
+ prox = data[7] & 0x03;
+ if (prox || wacom->id[1]) {
+ wacom->id[1] = PAD_DEVICE_ID;
+ input_report_key(pad_input, BTN_0, (data[7] & 0x02));
+ input_report_key(pad_input, BTN_1, (data[7] & 0x01));
if (!prox)
wacom->id[1] = 0;
- input_report_abs(input, ABS_MISC, wacom->id[1]);
- input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
+ input_report_abs(pad_input, ABS_MISC, wacom->id[1]);
retval = 1;
}
break;
}
+
+ /* Store current battery capacity and power supply state */
+ if (features->type == GRAPHIRE_BT) {
+ rw = (data[7] >> 2 & 0x07);
+ battery_capacity = batcap_gr[rw];
+ ps_connected = rw == 7;
+ if ((wacom->battery_capacity != battery_capacity) ||
+ (wacom->ps_connected != ps_connected)) {
+ wacom->battery_capacity = battery_capacity;
+ wacom->ps_connected = ps_connected;
+ wacom_notify_battery(wacom);
+ }
+ }
exit:
return retval;
}
@@ -584,6 +641,7 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
/* pad packets. Works as a second tool and is always in prox */
if (data[0] == WACOM_REPORT_INTUOSPAD || data[0] == WACOM_REPORT_INTUOS5PAD) {
+ input = wacom->pad_input;
if (features->type >= INTUOS4S && features->type <= INTUOS4L) {
input_report_key(input, BTN_0, (data[2] & 0x01));
input_report_key(input, BTN_1, (data[3] & 0x01));
@@ -773,7 +831,6 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_MISC, 0);
}
}
- input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff);
return 1;
}
@@ -901,6 +958,58 @@ static int int_dist(int x1, int y1, int x2, int y2)
return int_sqrt(x*x + y*y);
}
+static void wacom_intuos_bt_process_data(struct wacom_wac *wacom,
+ unsigned char *data)
+{
+ memcpy(wacom->data, data, 10);
+ wacom_intuos_irq(wacom);
+
+ input_sync(wacom->input);
+ if (wacom->pad_input)
+ input_sync(wacom->pad_input);
+}
+
+static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
+{
+ unsigned char data[WACOM_PKGLEN_MAX];
+ int i = 1;
+ unsigned power_raw, battery_capacity, bat_charging, ps_connected;
+
+ memcpy(data, wacom->data, len);
+
+ switch (data[0]) {
+ case 0x04:
+ wacom_intuos_bt_process_data(wacom, data + i);
+ i += 10;
+ /* fall through */
+ case 0x03:
+ wacom_intuos_bt_process_data(wacom, data + i);
+ i += 10;
+ wacom_intuos_bt_process_data(wacom, data + i);
+ i += 10;
+ power_raw = data[i];
+ bat_charging = (power_raw & 0x08) ? 1 : 0;
+ ps_connected = (power_raw & 0x10) ? 1 : 0;
+ battery_capacity = batcap_i4[power_raw & 0x07];
+ if ((wacom->battery_capacity != battery_capacity) ||
+ (wacom->bat_charging != bat_charging) ||
+ (wacom->ps_connected != ps_connected)) {
+ wacom->battery_capacity = battery_capacity;
+ wacom->bat_charging = bat_charging;
+ wacom->ps_connected = ps_connected;
+ wacom_notify_battery(wacom);
+ }
+
+ break;
+ default:
+ dev_dbg(wacom->input->dev.parent,
+ "Unknown report: %d,%d size:%zu\n",
+ data[0], data[1], len);
+ return 0;
+ }
+ return 0;
+}
+
static int wacom_24hdt_irq(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
@@ -1093,7 +1202,7 @@ static int wacom_tpc_pen(struct wacom_wac *wacom)
input_report_key(input, BTN_STYLUS2, data[1] & 0x10);
input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
- input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x03) << 8) | data[6]);
+ input_report_abs(input, ABS_PRESSURE, ((data[7] & 0x07) << 8) | data[6]);
input_report_key(input, BTN_TOUCH, data[1] & 0x05);
input_report_key(input, wacom->tool[0], prox);
return 1;
@@ -1143,6 +1252,7 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
struct input_dev *input = wacom->input;
+ struct input_dev *pad_input = wacom->pad_input;
unsigned char *data = wacom->data;
int i;
@@ -1177,14 +1287,12 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
input_mt_report_pointer_emulation(input, true);
- input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
- input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
- input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
- input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
-
- input_sync(input);
+ input_report_key(pad_input, BTN_LEFT, (data[1] & 0x08) != 0);
+ input_report_key(pad_input, BTN_FORWARD, (data[1] & 0x04) != 0);
+ input_report_key(pad_input, BTN_BACK, (data[1] & 0x02) != 0);
+ input_report_key(pad_input, BTN_RIGHT, (data[1] & 0x01) != 0);
- return 0;
+ return 1;
}
static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
@@ -1232,7 +1340,7 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
{
- struct input_dev *input = wacom->input;
+ struct input_dev *input = wacom->pad_input;
struct wacom_features *features = &wacom->features;
if (features->type == INTUOSHT) {
@@ -1269,9 +1377,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
}
input_mt_report_pointer_emulation(input, true);
- input_sync(input);
-
- return 0;
+ return 1;
}
static int wacom_bpt_pen(struct wacom_wac *wacom)
@@ -1375,7 +1481,7 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
connected = data[1] & 0x01;
if (connected) {
- int pid, battery;
+ int pid, battery, ps_connected;
if ((wacom->shared->type == INTUOSHT) &&
wacom->shared->touch_max) {
@@ -1385,17 +1491,29 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
}
pid = get_unaligned_be16(&data[6]);
- battery = data[5] & 0x3f;
+ battery = (data[5] & 0x3f) * 100 / 31;
+ ps_connected = !!(data[5] & 0x80);
if (wacom->pid != pid) {
wacom->pid = pid;
wacom_schedule_work(wacom);
}
- wacom->battery_capacity = battery;
+
+ if (wacom->shared->type &&
+ (battery != wacom->battery_capacity ||
+ ps_connected != wacom->ps_connected)) {
+ wacom->battery_capacity = battery;
+ wacom->ps_connected = ps_connected;
+ wacom->bat_charging = ps_connected &&
+ wacom->battery_capacity < 100;
+ wacom_notify_battery(wacom);
+ }
} else if (wacom->pid != 0) {
/* disconnected while previously connected */
wacom->pid = 0;
wacom_schedule_work(wacom);
wacom->battery_capacity = 0;
+ wacom->bat_charging = 0;
+ wacom->ps_connected = 0;
}
return 0;
@@ -1416,6 +1534,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case WACOM_G4:
case GRAPHIRE:
+ case GRAPHIRE_BT:
case WACOM_MO:
sync = wacom_graphire_irq(wacom_wac);
break;
@@ -1450,6 +1569,10 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
sync = wacom_intuos_irq(wacom_wac);
break;
+ case INTUOS4WL:
+ sync = wacom_intuos_bt_irq(wacom_wac, len);
+ break;
+
case WACOM_24HDT:
sync = wacom_24hdt_irq(wacom_wac);
break;
@@ -1489,8 +1612,11 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
break;
}
- if (sync)
+ if (sync) {
input_sync(wacom_wac->input);
+ if (wacom_wac->pad_input)
+ input_sync(wacom_wac->pad_input);
+ }
}
static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
@@ -1565,8 +1691,10 @@ void wacom_setup_device_quirks(struct wacom_features *features)
features->quirks |= WACOM_QUIRK_NO_INPUT;
/* must be monitor interface if no device_type set */
- if (!features->device_type)
+ if (!features->device_type) {
features->quirks |= WACOM_QUIRK_MONITOR;
+ features->quirks |= WACOM_QUIRK_BATTERY;
+ }
}
}
@@ -1615,7 +1743,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
struct wacom_wac *wacom_wac)
{
struct wacom_features *features = &wacom_wac->features;
- int i;
input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
@@ -1630,10 +1757,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
/* fall through */
case WACOM_G4:
- input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
-
- __set_bit(BTN_BACK, input_dev->keybit);
- __set_bit(BTN_FORWARD, input_dev->keybit);
/* fall through */
case GRAPHIRE:
@@ -1652,62 +1775,42 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
- case WACOM_24HD:
- __set_bit(BTN_A, input_dev->keybit);
- __set_bit(BTN_B, input_dev->keybit);
- __set_bit(BTN_C, input_dev->keybit);
- __set_bit(BTN_X, input_dev->keybit);
- __set_bit(BTN_Y, input_dev->keybit);
- __set_bit(BTN_Z, input_dev->keybit);
+ case GRAPHIRE_BT:
+ __clear_bit(ABS_MISC, input_dev->absbit);
+ input_set_abs_params(input_dev, ABS_DISTANCE, 0,
+ features->distance_max,
+ 0, 0);
- for (i = 6; i < 10; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
- __set_bit(KEY_PROG1, input_dev->keybit);
- __set_bit(KEY_PROG2, input_dev->keybit);
- __set_bit(KEY_PROG3, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ __set_bit(BTN_MIDDLE, input_dev->keybit);
+
+ __set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ __set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ __set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+ break;
+ case WACOM_24HD:
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
/* fall through */
case DTK:
- for (i = 0; i < 6; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
wacom_setup_cintiq(wacom_wac);
break;
case WACOM_22HD:
- __set_bit(KEY_PROG1, input_dev->keybit);
- __set_bit(KEY_PROG2, input_dev->keybit);
- __set_bit(KEY_PROG3, input_dev->keybit);
- /* fall through */
-
case WACOM_21UX2:
- __set_bit(BTN_A, input_dev->keybit);
- __set_bit(BTN_B, input_dev->keybit);
- __set_bit(BTN_C, input_dev->keybit);
- __set_bit(BTN_X, input_dev->keybit);
- __set_bit(BTN_Y, input_dev->keybit);
- __set_bit(BTN_Z, input_dev->keybit);
- __set_bit(BTN_BASE, input_dev->keybit);
- __set_bit(BTN_BASE2, input_dev->keybit);
- /* fall through */
-
case WACOM_BEE:
- __set_bit(BTN_8, input_dev->keybit);
- __set_bit(BTN_9, input_dev->keybit);
- /* fall through */
-
case CINTIQ:
- for (i = 0; i < 8; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
@@ -1716,9 +1819,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
break;
case WACOM_13HD:
- for (i = 0; i < 9; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
wacom_setup_cintiq(wacom_wac);
@@ -1726,21 +1826,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS3:
case INTUOS3L:
- __set_bit(BTN_4, input_dev->keybit);
- __set_bit(BTN_5, input_dev->keybit);
- __set_bit(BTN_6, input_dev->keybit);
- __set_bit(BTN_7, input_dev->keybit);
-
- input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
- /* fall through */
-
case INTUOS3S:
- __set_bit(BTN_0, input_dev->keybit);
- __set_bit(BTN_1, input_dev->keybit);
- __set_bit(BTN_2, input_dev->keybit);
- __set_bit(BTN_3, input_dev->keybit);
-
- input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
/* fall through */
@@ -1754,20 +1840,11 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case INTUOS5L:
case INTUOSPM:
case INTUOSPL:
- if (features->device_type == BTN_TOOL_PEN) {
- __set_bit(BTN_7, input_dev->keybit);
- __set_bit(BTN_8, input_dev->keybit);
- }
- /* fall through */
-
case INTUOS5S:
case INTUOSPS:
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->device_type == BTN_TOOL_PEN) {
- for (i = 0; i < 7; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_DISTANCE, 0,
features->distance_max,
0, 0);
@@ -1787,15 +1864,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
break;
case INTUOS4:
+ case INTUOS4WL:
case INTUOS4L:
- __set_bit(BTN_7, input_dev->keybit);
- __set_bit(BTN_8, input_dev->keybit);
- /* fall through */
-
case INTUOS4S:
- for (i = 0; i < 7; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
-
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
wacom_setup_intuos(wacom_wac);
@@ -1833,11 +1904,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case DTUS:
case PL:
case DTU:
- if (features->type == DTUS) {
- input_set_capability(input_dev, EV_MSC, MSC_SERIAL);
- for (i = 0; i < 4; i++)
- __set_bit(BTN_0 + i, input_dev->keybit);
- }
__set_bit(BTN_TOOL_PEN, input_dev->keybit);
__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
__set_bit(BTN_STYLUS, input_dev->keybit);
@@ -1871,11 +1937,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
if (features->device_type == BTN_TOOL_FINGER) {
- __set_bit(BTN_LEFT, input_dev->keybit);
- __set_bit(BTN_FORWARD, input_dev->keybit);
- __set_bit(BTN_BACK, input_dev->keybit);
- __set_bit(BTN_RIGHT, input_dev->keybit);
-
if (features->touch_max) {
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
input_set_abs_params(input_dev,
@@ -1905,449 +1966,629 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
break;
case CINTIQ_HYBRID:
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
+ wacom_setup_cintiq(wacom_wac);
+ break;
+ }
+ return 0;
+}
+
+int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
+ struct wacom_wac *wacom_wac)
+{
+ struct wacom_features *features = &wacom_wac->features;
+ int i;
+
+ input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+
+ /* kept for making legacy xf86-input-wacom working with the wheels */
+ __set_bit(ABS_MISC, input_dev->absbit);
+
+ /* kept for making legacy xf86-input-wacom accepting the pad */
+ input_set_abs_params(input_dev, ABS_X, 0, 1, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 1, 0, 0);
+
+ switch (features->type) {
+ case GRAPHIRE_BT:
+ __set_bit(BTN_0, input_dev->keybit);
__set_bit(BTN_1, input_dev->keybit);
- __set_bit(BTN_2, input_dev->keybit);
- __set_bit(BTN_3, input_dev->keybit);
- __set_bit(BTN_4, input_dev->keybit);
+ break;
+
+ case WACOM_MO:
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ break;
+
+ case WACOM_G4:
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+ input_set_capability(input_dev, EV_REL, REL_WHEEL);
+ break;
+
+ case WACOM_24HD:
+ __set_bit(BTN_A, input_dev->keybit);
+ __set_bit(BTN_B, input_dev->keybit);
+ __set_bit(BTN_C, input_dev->keybit);
+ __set_bit(BTN_X, input_dev->keybit);
+ __set_bit(BTN_Y, input_dev->keybit);
+ __set_bit(BTN_Z, input_dev->keybit);
+ for (i = 0; i < 10; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ __set_bit(KEY_PROG1, input_dev->keybit);
+ __set_bit(KEY_PROG2, input_dev->keybit);
+ __set_bit(KEY_PROG3, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
+ break;
+
+ case DTK:
+ for (i = 0; i < 6; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ break;
+
+ case WACOM_22HD:
+ __set_bit(KEY_PROG1, input_dev->keybit);
+ __set_bit(KEY_PROG2, input_dev->keybit);
+ __set_bit(KEY_PROG3, input_dev->keybit);
+ /* fall through */
+
+ case WACOM_21UX2:
+ __set_bit(BTN_A, input_dev->keybit);
+ __set_bit(BTN_B, input_dev->keybit);
+ __set_bit(BTN_C, input_dev->keybit);
+ __set_bit(BTN_X, input_dev->keybit);
+ __set_bit(BTN_Y, input_dev->keybit);
+ __set_bit(BTN_Z, input_dev->keybit);
+ __set_bit(BTN_BASE, input_dev->keybit);
+ __set_bit(BTN_BASE2, input_dev->keybit);
+ /* fall through */
+
+ case WACOM_BEE:
+ __set_bit(BTN_8, input_dev->keybit);
+ __set_bit(BTN_9, input_dev->keybit);
+ /* fall through */
+
+ case CINTIQ:
+ for (i = 0; i < 8; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ break;
+
+ case WACOM_13HD:
+ for (i = 0; i < 9; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ break;
+
+ case INTUOS3:
+ case INTUOS3L:
+ __set_bit(BTN_4, input_dev->keybit);
__set_bit(BTN_5, input_dev->keybit);
__set_bit(BTN_6, input_dev->keybit);
__set_bit(BTN_7, input_dev->keybit);
- __set_bit(BTN_8, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
+ /* fall through */
+
+ case INTUOS3S:
__set_bit(BTN_0, input_dev->keybit);
+ __set_bit(BTN_1, input_dev->keybit);
+ __set_bit(BTN_2, input_dev->keybit);
+ __set_bit(BTN_3, input_dev->keybit);
- input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
- __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
+ break;
- wacom_setup_cintiq(wacom_wac);
+ case INTUOS5:
+ case INTUOS5L:
+ case INTUOSPM:
+ case INTUOSPL:
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS5S:
+ case INTUOSPS:
+ /* touch interface does not have the pad device */
+ if (features->device_type != BTN_TOOL_PEN)
+ return 1;
+
+ for (i = 0; i < 7; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
break;
+
+ case INTUOS4WL:
+ /*
+ * For Bluetooth devices, the udev rule does not work correctly
+ * for pads unless we add a stylus capability, which forces
+ * ID_INPUT_TABLET to be set.
+ */
+ __set_bit(BTN_STYLUS, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS4:
+ case INTUOS4L:
+ __set_bit(BTN_7, input_dev->keybit);
+ __set_bit(BTN_8, input_dev->keybit);
+ /* fall through */
+
+ case INTUOS4S:
+ for (i = 0; i < 7; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
+ break;
+
+ case CINTIQ_HYBRID:
+ for (i = 0; i < 9; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ break;
+
+ case DTUS:
+ for (i = 0; i < 4; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+ break;
+
+ case INTUOSHT:
+ case BAMBOO_PT:
+ /* pad device is on the touch interface */
+ if (features->device_type != BTN_TOOL_FINGER)
+ return 1;
+
+ __clear_bit(ABS_MISC, input_dev->absbit);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_FORWARD, input_dev->keybit);
+ __set_bit(BTN_BACK, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+
+ break;
+
+ default:
+ /* no pad supported */
+ return 1;
}
return 0;
}
static const struct wacom_features wacom_features_0x00 =
- { "Wacom Penpartner", WACOM_PKGLEN_PENPRTN, 5040, 3780, 255,
- 0, PENPARTNER, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES };
+ { "Wacom Penpartner", 5040, 3780, 255, 0,
+ PENPARTNER, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES };
static const struct wacom_features wacom_features_0x10 =
- { "Wacom Graphire", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511,
- 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Graphire", 10206, 7422, 511, 63,
+ GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+static const struct wacom_features wacom_features_0x81 =
+ { "Wacom Graphire BT", 16704, 12064, 511, 32,
+ GRAPHIRE_BT, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x11 =
- { "Wacom Graphire2 4x5", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511,
- 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Graphire2 4x5", 10206, 7422, 511, 63,
+ GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x12 =
- { "Wacom Graphire2 5x7", WACOM_PKGLEN_GRAPHIRE, 13918, 10206, 511,
- 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Graphire2 5x7", 13918, 10206, 511, 63,
+ GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x13 =
- { "Wacom Graphire3", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511,
- 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Graphire3", 10208, 7424, 511, 63,
+ GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x14 =
- { "Wacom Graphire3 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511,
- 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Graphire3 6x8", 16704, 12064, 511, 63,
+ GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x15 =
- { "Wacom Graphire4 4x5", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511,
- 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Graphire4 4x5", 10208, 7424, 511, 63,
+ WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x16 =
- { "Wacom Graphire4 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511,
- 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Graphire4 6x8", 16704, 12064, 511, 63,
+ WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x17 =
- { "Wacom BambooFun 4x5", WACOM_PKGLEN_BBFUN, 14760, 9225, 511,
- 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom BambooFun 4x5", 14760, 9225, 511, 63,
+ WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x18 =
- { "Wacom BambooFun 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 511,
- 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom BambooFun 6x8", 21648, 13530, 511, 63,
+ WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x19 =
- { "Wacom Bamboo1 Medium", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511,
- 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
+ { "Wacom Bamboo1 Medium", 16704, 12064, 511, 63,
+ GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES };
static const struct wacom_features wacom_features_0x60 =
- { "Wacom Volito", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511,
- 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
+ { "Wacom Volito", 5104, 3712, 511, 63,
+ GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
static const struct wacom_features wacom_features_0x61 =
- { "Wacom PenStation2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 255,
- 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
+ { "Wacom PenStation2", 3250, 2320, 255, 63,
+ GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
static const struct wacom_features wacom_features_0x62 =
- { "Wacom Volito2 4x5", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511,
- 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
+ { "Wacom Volito2 4x5", 5104, 3712, 511, 63,
+ GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
static const struct wacom_features wacom_features_0x63 =
- { "Wacom Volito2 2x3", WACOM_PKGLEN_GRAPHIRE, 3248, 2320, 511,
- 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
+ { "Wacom Volito2 2x3", 3248, 2320, 511, 63,
+ GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
static const struct wacom_features wacom_features_0x64 =
- { "Wacom PenPartner2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 511,
- 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
+ { "Wacom PenPartner2", 3250, 2320, 511, 63,
+ GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES };
static const struct wacom_features wacom_features_0x65 =
- { "Wacom Bamboo", WACOM_PKGLEN_BBFUN, 14760, 9225, 511,
- 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo", 14760, 9225, 511, 63,
+ WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x69 =
- { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511,
- 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES };
+ { "Wacom Bamboo1", 5104, 3712, 511, 63,
+ GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES };
static const struct wacom_features wacom_features_0x6A =
- { "Wacom Bamboo1 4x6", WACOM_PKGLEN_GRAPHIRE, 14760, 9225, 1023,
- 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo1 4x6", 14760, 9225, 1023, 63,
+ GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6B =
- { "Wacom Bamboo1 5x8", WACOM_PKGLEN_GRAPHIRE, 21648, 13530, 1023,
- 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo1 5x8", 21648, 13530, 1023, 63,
+ GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x20 =
- { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos 4x5", 12700, 10600, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x21 =
- { "Wacom Intuos 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos 6x8", 20320, 16240, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x22 =
- { "Wacom Intuos 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos 9x12", 30480, 24060, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x23 =
- { "Wacom Intuos 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos 12x12", 30480, 31680, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x24 =
- { "Wacom Intuos 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos 12x18", 45720, 31680, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x30 =
- { "Wacom PL400", WACOM_PKGLEN_GRAPHIRE, 5408, 4056, 255,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL400", 5408, 4056, 255, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x31 =
- { "Wacom PL500", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 255,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL500", 6144, 4608, 255, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x32 =
- { "Wacom PL600", WACOM_PKGLEN_GRAPHIRE, 6126, 4604, 255,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL600", 6126, 4604, 255, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x33 =
- { "Wacom PL600SX", WACOM_PKGLEN_GRAPHIRE, 6260, 5016, 255,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL600SX", 6260, 5016, 255, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x34 =
- { "Wacom PL550", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL550", 6144, 4608, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x35 =
- { "Wacom PL800", WACOM_PKGLEN_GRAPHIRE, 7220, 5780, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL800", 7220, 5780, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x37 =
- { "Wacom PL700", WACOM_PKGLEN_GRAPHIRE, 6758, 5406, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL700", 6758, 5406, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x38 =
- { "Wacom PL510", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom PL510", 6282, 4762, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x39 =
- { "Wacom DTU710", WACOM_PKGLEN_GRAPHIRE, 34080, 27660, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom DTU710", 34080, 27660, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0xC4 =
- { "Wacom DTF521", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom DTF521", 6282, 4762, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0xC0 =
- { "Wacom DTF720", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom DTF720", 6858, 5506, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0xC2 =
- { "Wacom DTF720a", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511,
- 0, PL, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom DTF720a", 6858, 5506, 511, 0,
+ PL, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x03 =
- { "Wacom Cintiq Partner", WACOM_PKGLEN_GRAPHIRE, 20480, 15360, 511,
- 0, PTU, WACOM_PL_RES, WACOM_PL_RES };
+ { "Wacom Cintiq Partner", 20480, 15360, 511, 0,
+ PTU, WACOM_PL_RES, WACOM_PL_RES };
static const struct wacom_features wacom_features_0x41 =
- { "Wacom Intuos2 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos2 4x5", 12700, 10600, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x42 =
- { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos2 6x8", 20320, 16240, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x43 =
- { "Wacom Intuos2 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos2 9x12", 30480, 24060, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x44 =
- { "Wacom Intuos2 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos2 12x12", 30480, 31680, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x45 =
- { "Wacom Intuos2 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos2 12x18", 45720, 31680, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xB0 =
- { "Wacom Intuos3 4x5", WACOM_PKGLEN_INTUOS, 25400, 20320, 1023,
- 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos3 4x5", 25400, 20320, 1023, 63,
+ INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB1 =
- { "Wacom Intuos3 6x8", WACOM_PKGLEN_INTUOS, 40640, 30480, 1023,
- 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos3 6x8", 40640, 30480, 1023, 63,
+ INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB2 =
- { "Wacom Intuos3 9x12", WACOM_PKGLEN_INTUOS, 60960, 45720, 1023,
- 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos3 9x12", 60960, 45720, 1023, 63,
+ INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB3 =
- { "Wacom Intuos3 12x12", WACOM_PKGLEN_INTUOS, 60960, 60960, 1023,
- 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos3 12x12", 60960, 60960, 1023, 63,
+ INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB4 =
- { "Wacom Intuos3 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 1023,
- 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos3 12x19", 97536, 60960, 1023, 63,
+ INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB5 =
- { "Wacom Intuos3 6x11", WACOM_PKGLEN_INTUOS, 54204, 31750, 1023,
- 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos3 6x11", 54204, 31750, 1023, 63,
+ INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB7 =
- { "Wacom Intuos3 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 1023,
- 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos3 4x6", 31496, 19685, 1023, 63,
+ INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB8 =
- { "Wacom Intuos4 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
- 63, INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos4 4x6", 31496, 19685, 2047, 63,
+ INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xB9 =
- { "Wacom Intuos4 6x9", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
- 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos4 6x9", 44704, 27940, 2047, 63,
+ INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xBA =
- { "Wacom Intuos4 8x13", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
- 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos4 8x13", 65024, 40640, 2047, 63,
+ INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xBB =
- { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047,
- 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos4 12x19", 97536, 60960, 2047, 63,
+ INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xBC =
- { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047,
- 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos4 WL", 40640, 25400, 2047, 63,
+ INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0xBD =
+ { "Wacom Intuos4 WL", 40640, 25400, 2047, 63,
+ INTUOS4WL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x26 =
- { "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
- 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
- .touch_max = 16 };
+ { "Wacom Intuos5 touch S", 31496, 19685, 2047, 63,
+ INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
static const struct wacom_features wacom_features_0x27 =
- { "Wacom Intuos5 touch M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
- 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
- .touch_max = 16 };
+ { "Wacom Intuos5 touch M", 44704, 27940, 2047, 63,
+ INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
static const struct wacom_features wacom_features_0x28 =
- { "Wacom Intuos5 touch L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
- 63, INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
- .touch_max = 16 };
+ { "Wacom Intuos5 touch L", 65024, 40640, 2047, 63,
+ INTUOS5L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16 };
static const struct wacom_features wacom_features_0x29 =
- { "Wacom Intuos5 S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
- 63, INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos5 S", 31496, 19685, 2047, 63,
+ INTUOS5S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x2A =
- { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
- 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Intuos5 M", 44704, 27940, 2047, 63,
+ INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x314 =
- { "Wacom Intuos Pro S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
- 63, INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
- .touch_max = 16 };
+ { "Wacom Intuos Pro S", 31496, 19685, 2047, 63,
+ INTUOSPS, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x315 =
- { "Wacom Intuos Pro M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
- 63, INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
- .touch_max = 16 };
+ { "Wacom Intuos Pro M", 44704, 27940, 2047, 63,
+ INTUOSPM, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x317 =
- { "Wacom Intuos Pro L", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047,
- 63, INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
- .touch_max = 16 };
+ { "Wacom Intuos Pro L", 65024, 40640, 2047, 63,
+ INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xF4 =
- { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104280, 65400, 2047,
- 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 24HD", 104280, 65400, 2047, 63,
+ WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0xF8 =
- { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104280, 65400, 2047, /* Pen */
- 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "Wacom Cintiq 24HD touch", 104280, 65400, 2047, 63, /* Pen */
+ WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
static const struct wacom_features wacom_features_0xF6 =
{ "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
- .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10 };
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x3F =
- { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
- 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 21UX", 87200, 65600, 1023, 63,
+ CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xC5 =
- { "Wacom Cintiq 20WSX", WACOM_PKGLEN_INTUOS, 86680, 54180, 1023,
- 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 20WSX", 86680, 54180, 1023, 63,
+ WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xC6 =
- { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023,
- 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+ { "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
+ WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x304 =
- { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59352, 33648, 1023,
- 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 13HD", 59352, 33648, 1023, 63,
+ WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0xC7 =
- { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511,
- 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom DTU1931", 37832, 30305, 511, 0,
+ PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xCE =
- { "Wacom DTU2231", WACOM_PKGLEN_GRAPHIRE, 47864, 27011, 511,
- 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom DTU2231", 47864, 27011, 511, 0,
+ DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBMOUSE };
static const struct wacom_features wacom_features_0xF0 =
- { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
- 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom DTU1631", 34623, 19553, 511, 0,
+ DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xFB =
- { "Wacom DTU1031", WACOM_PKGLEN_DTUS, 22096, 13960, 511,
- 0, DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom DTU1031", 22096, 13960, 511, 0,
+ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x57 =
- { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
- 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom DTK2241", 95640, 54060, 2047, 63,
+ DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0x59 = /* Pen */
- { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
- 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "Wacom DTH2242", 95640, 54060, 2047, 63,
+ DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
static const struct wacom_features wacom_features_0x5D = /* Touch */
{ "Wacom DTH2242", .type = WACOM_24HDT,
- .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10 };
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0xCC =
- { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87000, 65400, 2047,
- 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 21UX2", 87000, 65400, 2047, 63,
+ WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0xFA =
- { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
- 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
+ { "Wacom Cintiq 22HD", 95640, 54060, 2047, 63,
+ WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 };
static const struct wacom_features wacom_features_0x5B =
- { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95640, 54060, 2047,
- 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "Wacom Cintiq 22HDT", 95640, 54060, 2047, 63,
+ WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
static const struct wacom_features wacom_features_0x5E =
{ "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
- .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 };
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x90 =
- { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 90", 26202, 16325, 255, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x93 =
- { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 93", 26202, 16325, 255, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x97 =
- { "Wacom ISDv4 97", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 511,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 97", 26202, 16325, 511, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x9A =
- { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 9A", 26202, 16325, 255, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x9F =
- { "Wacom ISDv4 9F", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 9F", 26202, 16325, 255, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xE2 =
- { "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom ISDv4 E2", 26202, 16325, 255, 0,
+ TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xE3 =
- { "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom ISDv4 E3", 26202, 16325, 255, 0,
+ TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xE5 =
- { "Wacom ISDv4 E5", WACOM_PKGLEN_MTOUCH, 26202, 16325, 255,
- 0, MTSCREEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 E5", 26202, 16325, 255, 0,
+ MTSCREEN, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xE6 =
- { "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
- 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom ISDv4 E6", 27760, 15694, 255, 0,
+ TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xEC =
- { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 EC", 25710, 14500, 255, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xED =
- { "Wacom ISDv4 ED", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
- 0, TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 ED", 26202, 16325, 255, 0,
+ TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xEF =
- { "Wacom ISDv4 EF", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 EF", 26202, 16325, 255, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x100 =
- { "Wacom ISDv4 100", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
- 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 100", 26202, 16325, 255, 0,
+ MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x101 =
- { "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
- 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 101", 26202, 16325, 255, 0,
+ MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x10D =
- { "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
- 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 10D", 26202, 16325, 255, 0,
+ MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x10E =
- { "Wacom ISDv4 10E", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
- 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 10E", 27760, 15694, 255, 0,
+ MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x10F =
- { "Wacom ISDv4 10F", WACOM_PKGLEN_MTTPC, 27760, 15694, 255,
- 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 10F", 27760, 15694, 255, 0,
+ MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x116 =
- { "Wacom ISDv4 116", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
- 0, TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 116", 26202, 16325, 255, 0,
+ TABLETPCE, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x12C =
+ { "Wacom ISDv4 12C", 27848, 15752, 2047, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x4001 =
- { "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
- 0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 4001", 26202, 16325, 255, 0,
+ MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x4004 =
- { "Wacom ISDv4 4004", WACOM_PKGLEN_MTTPC, 11060, 6220, 255,
- 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 4004", 11060, 6220, 255, 0,
+ MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x5000 =
- { "Wacom ISDv4 5000", WACOM_PKGLEN_MTTPC, 27848, 15752, 1023,
- 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 5000", 27848, 15752, 1023, 0,
+ MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x5002 =
- { "Wacom ISDv4 5002", WACOM_PKGLEN_MTTPC, 29576, 16724, 1023,
- 0, MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom ISDv4 5002", 29576, 16724, 1023, 0,
+ MTTPC_B, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
- { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
- 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos2 6x8", 20320, 16240, 1023, 31,
+ INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x84 =
- { "Wacom Wireless Receiver", WACOM_PKGLEN_WIRELESS, 0, 0, 0,
- 0, WIRELESS, 0, 0, .touch_max = 16 };
+ { "Wacom Wireless Receiver", 0, 0, 0, 0,
+ WIRELESS, 0, 0, .touch_max = 16 };
static const struct wacom_features wacom_features_0xD0 =
- { "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom Bamboo 2FG", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xD1 =
- { "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom Bamboo 2FG 4x5", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xD2 =
- { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom Bamboo Craft", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xD3 =
- { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom Bamboo 2FG 6x8", 21648, 13700, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xD4 =
- { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo Pen", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD5 =
- { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo Pen 6x8", 21648, 13700, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xD6 =
- { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom BambooPT 2FG 4x5", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xD7 =
- { "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom BambooPT 2FG Small", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xD8 =
- { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom Bamboo Comic 2FG", 21648, 13700, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xDA =
- { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom Bamboo 2FG 4x5 SE", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xDB =
- { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 2 };
+ { "Wacom Bamboo 2FG 6x8 SE", 21648, 13700, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 2 };
static const struct wacom_features wacom_features_0xDD =
- { "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo Connect", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xDE =
- { "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 16 };
+ { "Wacom Bamboo 16FG 4x5", 14720, 9200, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16 };
static const struct wacom_features wacom_features_0xDF =
- { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 16 };
+ { "Wacom Bamboo 16FG 6x8", 21648, 13700, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16 };
static const struct wacom_features wacom_features_0x300 =
- { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo One S", 14720, 9225, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x301 =
- { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
- 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Bamboo One M", 21648, 13530, 1023, 31,
+ BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x302 =
- { "Wacom Intuos PT S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
- 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 16 };
+ { "Wacom Intuos PT S", 15200, 9500, 1023, 31,
+ INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x303 =
- { "Wacom Intuos PT M", WACOM_PKGLEN_BBPEN, 21600, 13500, 1023,
- 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
- .touch_max = 16 };
+ { "Wacom Intuos PT M", 21600, 13500, 1023, 31,
+ INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x30E =
- { "Wacom Intuos S", WACOM_PKGLEN_BBPEN, 15200, 9500, 1023,
- 31, INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+ { "Wacom Intuos S", 15200, 9500, 1023, 31,
+ INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
static const struct wacom_features wacom_features_0x6004 =
- { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
- 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
-static const struct wacom_features wacom_features_0x0307 =
- { "Wacom ISDv5 307", WACOM_PKGLEN_INTUOS, 59352, 33648, 2047,
- 63, CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
+ { "ISD-V4", 12800, 8000, 255, 0,
+ TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x307 =
+ { "Wacom ISDv5 307", 59352, 33648, 2047, 63,
+ CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200,
.oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
-static const struct wacom_features wacom_features_0x0309 =
+static const struct wacom_features wacom_features_0x309 =
{ "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
- .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10 };
+ .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
+ .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
-#define USB_DEVICE_WACOM(prod) \
- USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
- .driver_info = (kernel_ulong_t)&wacom_features_##prod
+#define USB_DEVICE_WACOM(prod) \
+ HID_DEVICE(BUS_USB, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
-#define USB_DEVICE_DETAILED(prod, class, sub, proto) \
- USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_WACOM, prod, class, \
- sub, proto), \
- .driver_info = (kernel_ulong_t)&wacom_features_##prod
+#define BT_DEVICE_WACOM(prod) \
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
#define USB_DEVICE_LENOVO(prod) \
- USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
- .driver_info = (kernel_ulong_t)&wacom_features_##prod
+ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \
+ .driver_data = (kernel_ulong_t)&wacom_features_##prod
-const struct usb_device_id wacom_ids[] = {
+const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x00) },
+ { USB_DEVICE_WACOM(0x03) },
{ USB_DEVICE_WACOM(0x10) },
{ USB_DEVICE_WACOM(0x11) },
{ USB_DEVICE_WACOM(0x12) },
@@ -2358,20 +2599,16 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x17) },
{ USB_DEVICE_WACOM(0x18) },
{ USB_DEVICE_WACOM(0x19) },
- { USB_DEVICE_WACOM(0x60) },
- { USB_DEVICE_WACOM(0x61) },
- { USB_DEVICE_WACOM(0x62) },
- { USB_DEVICE_WACOM(0x63) },
- { USB_DEVICE_WACOM(0x64) },
- { USB_DEVICE_WACOM(0x65) },
- { USB_DEVICE_WACOM(0x69) },
- { USB_DEVICE_WACOM(0x6A) },
- { USB_DEVICE_WACOM(0x6B) },
{ USB_DEVICE_WACOM(0x20) },
{ USB_DEVICE_WACOM(0x21) },
{ USB_DEVICE_WACOM(0x22) },
{ USB_DEVICE_WACOM(0x23) },
{ USB_DEVICE_WACOM(0x24) },
+ { USB_DEVICE_WACOM(0x26) },
+ { USB_DEVICE_WACOM(0x27) },
+ { USB_DEVICE_WACOM(0x28) },
+ { USB_DEVICE_WACOM(0x29) },
+ { USB_DEVICE_WACOM(0x2A) },
{ USB_DEVICE_WACOM(0x30) },
{ USB_DEVICE_WACOM(0x31) },
{ USB_DEVICE_WACOM(0x32) },
@@ -2381,20 +2618,34 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x37) },
{ USB_DEVICE_WACOM(0x38) },
{ USB_DEVICE_WACOM(0x39) },
- { USB_DEVICE_WACOM(0xC4) },
- { USB_DEVICE_WACOM(0xC0) },
- { USB_DEVICE_WACOM(0xC2) },
- { USB_DEVICE_WACOM(0x03) },
+ { USB_DEVICE_WACOM(0x3F) },
{ USB_DEVICE_WACOM(0x41) },
{ USB_DEVICE_WACOM(0x42) },
{ USB_DEVICE_WACOM(0x43) },
{ USB_DEVICE_WACOM(0x44) },
{ USB_DEVICE_WACOM(0x45) },
+ { USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0x57) },
{ USB_DEVICE_WACOM(0x59) },
- { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
{ USB_DEVICE_WACOM(0x5B) },
- { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0x5D) },
+ { USB_DEVICE_WACOM(0x5E) },
+ { USB_DEVICE_WACOM(0x60) },
+ { USB_DEVICE_WACOM(0x61) },
+ { USB_DEVICE_WACOM(0x62) },
+ { USB_DEVICE_WACOM(0x63) },
+ { USB_DEVICE_WACOM(0x64) },
+ { USB_DEVICE_WACOM(0x65) },
+ { USB_DEVICE_WACOM(0x69) },
+ { USB_DEVICE_WACOM(0x6A) },
+ { USB_DEVICE_WACOM(0x6B) },
+ { BT_DEVICE_WACOM(0x81) },
+ { USB_DEVICE_WACOM(0x84) },
+ { USB_DEVICE_WACOM(0x90) },
+ { USB_DEVICE_WACOM(0x93) },
+ { USB_DEVICE_WACOM(0x97) },
+ { USB_DEVICE_WACOM(0x9A) },
+ { USB_DEVICE_WACOM(0x9F) },
{ USB_DEVICE_WACOM(0xB0) },
{ USB_DEVICE_WACOM(0xB1) },
{ USB_DEVICE_WACOM(0xB2) },
@@ -2407,23 +2658,15 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xBA) },
{ USB_DEVICE_WACOM(0xBB) },
{ USB_DEVICE_WACOM(0xBC) },
- { USB_DEVICE_WACOM(0x26) },
- { USB_DEVICE_WACOM(0x27) },
- { USB_DEVICE_WACOM(0x28) },
- { USB_DEVICE_WACOM(0x29) },
- { USB_DEVICE_WACOM(0x2A) },
- { USB_DEVICE_WACOM(0x3F) },
+ { BT_DEVICE_WACOM(0xBD) },
+ { USB_DEVICE_WACOM(0xC0) },
+ { USB_DEVICE_WACOM(0xC2) },
+ { USB_DEVICE_WACOM(0xC4) },
{ USB_DEVICE_WACOM(0xC5) },
{ USB_DEVICE_WACOM(0xC6) },
{ USB_DEVICE_WACOM(0xC7) },
- /*
- * DTU-2231 has two interfaces on the same configuration,
- * only one is used.
- */
- { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID,
- USB_INTERFACE_SUBCLASS_BOOT,
- USB_INTERFACE_PROTOCOL_MOUSE) },
- { USB_DEVICE_WACOM(0x84) },
+ { USB_DEVICE_WACOM(0xCC) },
+ { USB_DEVICE_WACOM(0xCE) },
{ USB_DEVICE_WACOM(0xD0) },
{ USB_DEVICE_WACOM(0xD1) },
{ USB_DEVICE_WACOM(0xD2) },
@@ -2438,13 +2681,6 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xDD) },
{ USB_DEVICE_WACOM(0xDE) },
{ USB_DEVICE_WACOM(0xDF) },
- { USB_DEVICE_WACOM(0xF0) },
- { USB_DEVICE_WACOM(0xCC) },
- { USB_DEVICE_WACOM(0x90) },
- { USB_DEVICE_WACOM(0x93) },
- { USB_DEVICE_WACOM(0x97) },
- { USB_DEVICE_WACOM(0x9A) },
- { USB_DEVICE_WACOM(0x9F) },
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE5) },
@@ -2452,34 +2688,34 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0xED) },
{ USB_DEVICE_WACOM(0xEF) },
+ { USB_DEVICE_WACOM(0xF0) },
+ { USB_DEVICE_WACOM(0xF4) },
+ { USB_DEVICE_WACOM(0xF6) },
+ { USB_DEVICE_WACOM(0xF8) },
+ { USB_DEVICE_WACOM(0xFA) },
+ { USB_DEVICE_WACOM(0xFB) },
{ USB_DEVICE_WACOM(0x100) },
{ USB_DEVICE_WACOM(0x101) },
{ USB_DEVICE_WACOM(0x10D) },
{ USB_DEVICE_WACOM(0x10E) },
{ USB_DEVICE_WACOM(0x10F) },
{ USB_DEVICE_WACOM(0x116) },
+ { USB_DEVICE_WACOM(0x12C) },
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
- { USB_DEVICE_DETAILED(0x302, USB_CLASS_HID, 0, 0) },
- { USB_DEVICE_DETAILED(0x303, USB_CLASS_HID, 0, 0) },
- { USB_DEVICE_DETAILED(0x30E, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0x302) },
+ { USB_DEVICE_WACOM(0x303) },
{ USB_DEVICE_WACOM(0x304) },
- { USB_DEVICE_DETAILED(0x314, USB_CLASS_HID, 0, 0) },
- { USB_DEVICE_DETAILED(0x315, USB_CLASS_HID, 0, 0) },
- { USB_DEVICE_DETAILED(0x317, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0x307) },
+ { USB_DEVICE_WACOM(0x309) },
+ { USB_DEVICE_WACOM(0x30E) },
+ { USB_DEVICE_WACOM(0x314) },
+ { USB_DEVICE_WACOM(0x315) },
+ { USB_DEVICE_WACOM(0x317) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
{ USB_DEVICE_WACOM(0x5002) },
- { USB_DEVICE_WACOM(0x47) },
- { USB_DEVICE_WACOM(0xF4) },
- { USB_DEVICE_WACOM(0xF8) },
- { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
- { USB_DEVICE_WACOM(0xFA) },
- { USB_DEVICE_WACOM(0xFB) },
- { USB_DEVICE_WACOM(0x0307) },
- { USB_DEVICE_DETAILED(0x0309, USB_CLASS_HID, 0, 0) },
- { USB_DEVICE_LENOVO(0x6004) },
{ }
};
-MODULE_DEVICE_TABLE(usb, wacom_ids);
+MODULE_DEVICE_TABLE(hid, wacom_ids);
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/hid/wacom_wac.h
index b2c9a9c1b551..339ab5d81a2d 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -46,6 +46,7 @@
/* wacom data packet report IDs */
#define WACOM_REPORT_PENABLED 2
+#define WACOM_REPORT_PENABLED_BT 3
#define WACOM_REPORT_INTUOSREAD 5
#define WACOM_REPORT_INTUOSWRITE 6
#define WACOM_REPORT_INTUOSPAD 12
@@ -68,10 +69,12 @@
#define WACOM_QUIRK_BBTOUCH_LOWRES 0x0002
#define WACOM_QUIRK_NO_INPUT 0x0004
#define WACOM_QUIRK_MONITOR 0x0008
+#define WACOM_QUIRK_BATTERY 0x0010
enum {
PENPARTNER = 0,
GRAPHIRE,
+ GRAPHIRE_BT,
WACOM_G4,
PTU,
PL,
@@ -83,6 +86,7 @@ enum {
INTUOS3L,
INTUOS4S,
INTUOS4,
+ INTUOS4WL,
INTUOS4L,
INTUOS5S,
INTUOS5,
@@ -114,7 +118,6 @@ enum {
struct wacom_features {
const char *name;
- int pktlen;
int x_max;
int y_max;
int pressure_max;
@@ -127,8 +130,8 @@ struct wacom_features {
int device_type;
int x_phy;
int y_phy;
- unsigned char unit;
- unsigned char unitExpo;
+ unsigned unit;
+ int unitExpo;
int x_fuzz;
int y_fuzz;
int pressure_fuzz;
@@ -137,6 +140,9 @@ struct wacom_features {
unsigned touch_max;
int oVid;
int oPid;
+ int pktlen;
+ bool check_for_hid_type;
+ int hid_type;
};
struct wacom_shared {
@@ -150,16 +156,24 @@ struct wacom_shared {
struct wacom_wac {
char name[WACOM_NAME_MAX];
- unsigned char *data;
+ char pad_name[WACOM_NAME_MAX];
+ char bat_name[WACOM_NAME_MAX];
+ char ac_name[WACOM_NAME_MAX];
+ unsigned char data[WACOM_PKGLEN_MAX];
int tool[2];
int id[2];
__u32 serial[2];
struct wacom_features features;
struct wacom_shared *shared;
struct input_dev *input;
+ struct input_dev *pad_input;
int pid;
int battery_capacity;
int num_contacts_left;
+ int bat_charging;
+ int ps_connected;
+ u8 bt_features;
+ u8 bt_high_speed;
};
#endif
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 737fa2e0e782..e5c7a969f28b 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -901,7 +901,7 @@ out:
ssip_free_data(msg);
}
-void ssip_port_event(struct hsi_client *cl, unsigned long event)
+static void ssip_port_event(struct hsi_client *cl, unsigned long event)
{
switch (event) {
case HSI_EVENT_START_RX:
diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c
index 0fc7a7fd0140..bf0eace4cb67 100644
--- a/drivers/hsi/controllers/omap_ssi.c
+++ b/drivers/hsi/controllers/omap_ssi.c
@@ -148,14 +148,14 @@ static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi)
/* SSI controller */
omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL);
- if (IS_ERR(omap_ssi->dir))
- return PTR_ERR(omap_ssi->dir);
+ if (!omap_ssi->dir)
+ return -ENOMEM;
debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi,
&ssi_regs_fops);
/* SSI GDD (DMA) */
dir = debugfs_create_dir("gdd", omap_ssi->dir);
- if (IS_ERR(dir))
+ if (!dir)
goto rback;
debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops);
@@ -163,7 +163,7 @@ static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi)
rback:
debugfs_remove_recursive(omap_ssi->dir);
- return PTR_ERR(dir);
+ return -ENOMEM;
}
static void ssi_debug_remove_ctrl(struct hsi_controller *ssi)
@@ -353,12 +353,12 @@ static int __init ssi_add_controller(struct hsi_controller *ssi,
err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL);
if (err < 0)
goto out_err;
- omap_ssi->gdd_irq = platform_get_irq_byname(pd, "gdd_mpu");
- if (omap_ssi->gdd_irq < 0) {
+ err = platform_get_irq_byname(pd, "gdd_mpu");
+ if (err < 0) {
dev_err(&pd->dev, "GDD IRQ resource missing\n");
- err = omap_ssi->gdd_irq;
goto out_err;
}
+ omap_ssi->gdd_irq = err;
tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet,
(unsigned long)ssi);
err = devm_request_irq(&ssi->device, omap_ssi->gdd_irq, ssi_gdd_isr,
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index 29aea0b93360..4c0b5820581e 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -177,13 +177,13 @@ static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
struct hsi_port *port = to_hsi_port(omap_port->dev);
dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
- if (IS_ERR(dir))
- return PTR_ERR(dir);
+ if (!dir)
+ return -ENOMEM;
omap_port->dir = dir;
debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
dir = debugfs_create_dir("sst", dir);
- if (IS_ERR(dir))
- return PTR_ERR(dir);
+ if (!dir)
+ return -ENOMEM;
debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
&ssi_sst_div_fops);
@@ -1013,11 +1013,12 @@ static int __init ssi_port_irq(struct hsi_port *port,
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
int err;
- omap_port->irq = platform_get_irq(pd, 0);
- if (omap_port->irq < 0) {
+ err = platform_get_irq(pd, 0);
+ if (err < 0) {
dev_err(&port->device, "Port IRQ resource missing\n");
- return omap_port->irq;
+ return err;
}
+ omap_port->irq = err;
tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
(unsigned long)port);
err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr,
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index ae208f612198..cccef87963e0 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -688,7 +688,7 @@ static int atk_debugfs_gitm_get(void *p, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm,
atk_debugfs_gitm_get,
NULL,
- "0x%08llx\n")
+ "0x%08llx\n");
static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj)
{
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 7b7ea320a258..3e3b680dc007 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -2,7 +2,9 @@
# I2C subsystem configuration
#
-menuconfig I2C
+menu "I2C support"
+
+config I2C
tristate "I2C support"
select RT_MUTEXES
---help---
@@ -21,6 +23,18 @@ menuconfig I2C
This I2C support can also be built as a module. If so, the module
will be called i2c-core.
+config I2C_ACPI
+ bool "I2C ACPI support"
+ select I2C
+ depends on ACPI
+ default y
+ help
+ Say Y here if you want to enable ACPI I2C support. This includes support
+ for automatic enumeration of I2C slave devices and support for ACPI I2C
+ Operation Regions. Operation Regions allow firmware (BIOS) code to
+ access I2C slave devices, such as smart batteries through an I2C host
+ controller driver.
+
if I2C
config I2C_BOARDINFO
@@ -124,3 +138,5 @@ config I2C_DEBUG_BUS
on.
endif # I2C
+
+endmenu
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index 1722f50f2473..a1f590cbb435 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -2,8 +2,11 @@
# Makefile for the i2c core.
#
+i2ccore-y := i2c-core.o
+i2ccore-$(CONFIG_I2C_ACPI) += i2c-acpi.o
+
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
-obj-$(CONFIG_I2C) += i2c-core.o
+obj-$(CONFIG_I2C) += i2ccore.o
obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9f7d5859cf65..2ac87fa3058d 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -109,6 +109,7 @@ config I2C_I801
Avoton (SOC)
Wellsburg (PCH)
Coleto Creek (PCH)
+ Wildcat Point (PCH)
Wildcat Point-LP (PCH)
BayTrail (SOC)
@@ -465,9 +466,9 @@ config I2C_EG20T
config I2C_EXYNOS5
tristate "Exynos5 high-speed I2C driver"
depends on ARCH_EXYNOS5 && OF
+ default y
help
- Say Y here to include support for high-speed I2C controller in the
- Exynos5 based Samsung SoCs.
+ High-speed I2C controller on Exynos5 based Samsung SoCs.
config I2C_GPIO
tristate "GPIO-based bitbanging I2C"
@@ -700,16 +701,6 @@ config I2C_S3C2410
Say Y here to include support for I2C controller in the
Samsung SoCs.
-config I2C_S6000
- tristate "S6000 I2C support"
- depends on XTENSA_VARIANT_S6000
- help
- This driver supports the on chip I2C device on the
- S6000 xtensa processor family.
-
- To compile this driver as a module, choose M here. The module
- will be called i2c-s6000.
-
config I2C_SH7760
tristate "Renesas SH7760 I2C Controller"
depends on CPU_SUBTYPE_SH7760
@@ -1018,37 +1009,6 @@ config I2C_CROS_EC_TUNNEL
connected there. This will work whatever the interface used to
talk to the EC (SPI, I2C or LPC).
-config SCx200_I2C
- tristate "NatSemi SCx200 I2C using GPIO pins (DEPRECATED)"
- depends on SCx200_GPIO
- select I2C_ALGOBIT
- help
- Enable the use of two GPIO pins of a SCx200 processor as an I2C bus.
-
- If you don't know what to do here, say N.
-
- This support is also available as a module. If so, the module
- will be called scx200_i2c.
-
- This driver is deprecated and will be dropped soon. Use i2c-gpio
- (or scx200_acb) instead.
-
-config SCx200_I2C_SCL
- int "GPIO pin used for SCL"
- depends on SCx200_I2C
- default "12"
- help
- Enter the GPIO pin number used for the SCL signal. This value can
- also be specified with a module parameter.
-
-config SCx200_I2C_SDA
- int "GPIO pin used for SDA"
- depends on SCx200_I2C
- default "13"
- help
- Enter the GPIO pin number used for the SSA signal. This value can
- also be specified with a module parameter.
-
config SCx200_ACB
tristate "Geode ACCESS.bus support"
depends on X86_32 && PCI
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index dd9a7f8e873f..49bf07e5ef4d 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -68,7 +68,6 @@ obj-$(CONFIG_I2C_QUP) += i2c-qup.o
obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
-obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
@@ -101,6 +100,5 @@ obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
-obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index e95f9ba96790..79a68999a696 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -210,7 +210,7 @@ static void at91_twi_write_data_dma_callback(void *data)
struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
- dev->buf_len, DMA_MEM_TO_DEV);
+ dev->buf_len, DMA_TO_DEVICE);
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
}
@@ -289,7 +289,7 @@ static void at91_twi_read_data_dma_callback(void *data)
struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
- dev->buf_len, DMA_DEV_TO_MEM);
+ dev->buf_len, DMA_FROM_DEVICE);
/* The last two bytes have to be read without using dma */
dev->buf += dev->buf_len - 2;
@@ -768,7 +768,7 @@ static int at91_twi_probe(struct platform_device *pdev)
snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
i2c_set_adapdata(&dev->adapter, dev);
dev->adapter.owner = THIS_MODULE;
- dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
+ dev->adapter.class = I2C_CLASS_DEPRECATED;
dev->adapter.algo = &at91_twi_algorithm;
dev->adapter.dev.parent = dev->dev;
dev->adapter.nr = pdev->id;
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 214ff9700efe..4b8ecd0b3661 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -277,7 +277,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adapter;
i2c_set_adapdata(adap, i2c_dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
+ adap->class = I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "bcm2835 I2C adapter", sizeof(adap->name));
adap->algo = &bcm2835_i2c_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 3e271e7558d3..067c1615e968 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -648,7 +648,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
strlcpy(p_adap->name, pdev->name, sizeof(p_adap->name));
p_adap->algo = &bfin_twi_algorithm;
p_adap->algo_data = iface;
- p_adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
+ p_adap->class = I2C_CLASS_DEPRECATED;
p_adap->dev.parent = &pdev->dev;
p_adap->timeout = 5 * HZ;
p_adap->retries = 3;
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index 8e7a71487bb1..05e033c98115 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -183,6 +183,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
u8 *request = NULL;
u8 *response = NULL;
int result;
+ struct cros_ec_command msg;
request_len = ec_i2c_count_message(i2c_msgs, num);
if (request_len < 0) {
@@ -218,10 +219,16 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[],
}
ec_i2c_construct_message(request, i2c_msgs, num, bus_num);
- result = bus->ec->command_sendrecv(bus->ec, EC_CMD_I2C_PASSTHRU,
- request, request_len,
- response, response_len);
- if (result)
+
+ msg.version = 0;
+ msg.command = EC_CMD_I2C_PASSTHRU;
+ msg.outdata = request;
+ msg.outsize = request_len;
+ msg.indata = response;
+ msg.insize = response_len;
+
+ result = bus->ec->cmd_xfer(bus->ec, &msg);
+ if (result < 0)
goto exit;
result = ec_i2c_parse_response(response, i2c_msgs, &num);
@@ -258,7 +265,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
u32 remote_bus;
int err;
- if (!ec->command_sendrecv) {
+ if (!ec->cmd_xfer) {
dev_err(dev, "Missing sendrecv\n");
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 389bc68c55ad..4d9614719128 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -712,7 +712,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
+ adap->class = I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "DaVinci I2C adapter", sizeof(adap->name));
adap->algo = &i2c_davinci_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 3356f7ab9f79..d31d313ab4f7 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -188,6 +188,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.scl_sda_cfg = &hsw_config,
},
};
+
static struct i2c_algorithm i2c_dw_algo = {
.master_xfer = i2c_dw_xfer,
.functionality = i2c_dw_func,
@@ -350,6 +351,14 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c61), haswell },
{ PCI_VDEVICE(INTEL, 0x9c62), haswell },
+ /* Braswell / Cherrytrail */
+ { PCI_VDEVICE(INTEL, 0x22C1), baytrail,},
+ { PCI_VDEVICE(INTEL, 0x22C2), baytrail },
+ { PCI_VDEVICE(INTEL, 0x22C3), baytrail },
+ { PCI_VDEVICE(INTEL, 0x22C4), baytrail },
+ { PCI_VDEVICE(INTEL, 0x22C5), baytrail },
+ { PCI_VDEVICE(INTEL, 0x22C6), baytrail },
+ { PCI_VDEVICE(INTEL, 0x22C7), baytrail },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 402ec3970fed..bc8773333155 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -106,6 +106,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "INT3432", 0 },
{ "INT3433", 0 },
{ "80860F41", 0 },
+ { "808622C1", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
@@ -202,7 +203,7 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
+ adap->class = I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
sizeof(adap->name));
adap->algo = &i2c_dw_algo;
diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c
index f7eccd682de9..10b8323b08d4 100644
--- a/drivers/i2c/busses/i2c-efm32.c
+++ b/drivers/i2c/busses/i2c-efm32.c
@@ -370,7 +370,13 @@ static int efm32_i2c_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(np, "efm32,location", &location);
+
+ ret = of_property_read_u32(np, "energymicro,location", &location);
+
+ if (ret)
+ /* fall back to wrongly namespaced property */
+ ret = of_property_read_u32(np, "efm32,location", &location);
+
if (!ret) {
dev_dbg(&pdev->dev, "using location %u\n", location);
} else {
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 63d229202854..28073f1d6d47 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -405,7 +405,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
int_status = readl(i2c->regs + HSI2C_INT_STATUS);
writel(int_status, i2c->regs + HSI2C_INT_STATUS);
- fifo_status = readl(i2c->regs + HSI2C_FIFO_STATUS);
/* handle interrupt related to the transfer status */
if (int_status & HSI2C_INT_I2C) {
@@ -526,7 +525,7 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
if (i2c->msg->flags & I2C_M_RD) {
i2c_ctl |= HSI2C_RXCHON;
- i2c_auto_conf = HSI2C_READ_WRITE;
+ i2c_auto_conf |= HSI2C_READ_WRITE;
trig_lvl = (i2c->msg->len > i2c->variant->fifo_depth) ?
(i2c->variant->fifo_depth * 3 / 4) : i2c->msg->len;
@@ -549,7 +548,6 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
writel(fifo_ctl, i2c->regs + HSI2C_FIFO_CTL);
writel(i2c_ctl, i2c->regs + HSI2C_CTL);
-
/*
* Enable interrupts before starting the transfer so that we don't
* miss any INT_I2C interrupts.
@@ -789,8 +787,16 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(exynos5_i2c_dev_pm_ops, exynos5_i2c_suspend_noirq,
- exynos5_i2c_resume_noirq);
+static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend_noirq = exynos5_i2c_suspend_noirq,
+ .resume_noirq = exynos5_i2c_resume_noirq,
+ .freeze_noirq = exynos5_i2c_suspend_noirq,
+ .thaw_noirq = exynos5_i2c_resume_noirq,
+ .poweroff_noirq = exynos5_i2c_suspend_noirq,
+ .restore_noirq = exynos5_i2c_resume_noirq,
+#endif
+};
static struct platform_driver exynos5_i2c_driver = {
.probe = exynos5_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 71a45b210a24..933f1e453e41 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -238,12 +238,10 @@ static int i2c_gpio_probe(struct platform_device *pdev)
static int i2c_gpio_remove(struct platform_device *pdev)
{
struct i2c_gpio_private_data *priv;
- struct i2c_gpio_platform_data *pdata;
struct i2c_adapter *adap;
priv = platform_get_drvdata(pdev);
adap = &priv->adap;
- pdata = &priv->pdata;
i2c_del_adapter(adap);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 6777cd6f8776..2994690b26e9 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -22,57 +22,58 @@
*/
/*
- Supports the following Intel I/O Controller Hubs (ICH):
-
- I/O Block I2C
- region SMBus Block proc. block
- Chip name PCI ID size PEC buffer call read
- ----------------------------------------------------------------------
- 82801AA (ICH) 0x2413 16 no no no no
- 82801AB (ICH0) 0x2423 16 no no no no
- 82801BA (ICH2) 0x2443 16 no no no no
- 82801CA (ICH3) 0x2483 32 soft no no no
- 82801DB (ICH4) 0x24c3 32 hard yes no no
- 82801E (ICH5) 0x24d3 32 hard yes yes yes
- 6300ESB 0x25a4 32 hard yes yes yes
- 82801F (ICH6) 0x266a 32 hard yes yes yes
- 6310ESB/6320ESB 0x269b 32 hard yes yes yes
- 82801G (ICH7) 0x27da 32 hard yes yes yes
- 82801H (ICH8) 0x283e 32 hard yes yes yes
- 82801I (ICH9) 0x2930 32 hard yes yes yes
- EP80579 (Tolapai) 0x5032 32 hard yes yes yes
- ICH10 0x3a30 32 hard yes yes yes
- ICH10 0x3a60 32 hard yes yes yes
- 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes
- 6 Series (PCH) 0x1c22 32 hard yes yes yes
- Patsburg (PCH) 0x1d22 32 hard yes yes yes
- Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes
- Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes
- Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes
- DH89xxCC (PCH) 0x2330 32 hard yes yes yes
- Panther Point (PCH) 0x1e22 32 hard yes yes yes
- Lynx Point (PCH) 0x8c22 32 hard yes yes yes
- Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes
- Avoton (SOC) 0x1f3c 32 hard yes yes yes
- Wellsburg (PCH) 0x8d22 32 hard yes yes yes
- Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes
- Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
- Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
- Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
- Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
- BayTrail (SOC) 0x0f12 32 hard yes yes yes
-
- Features supported by this driver:
- Software PEC no
- Hardware PEC yes
- Block buffer yes
- Block process call transaction no
- I2C block read transaction yes (doesn't use the block buffer)
- Slave mode no
- Interrupt processing yes
-
- See the file Documentation/i2c/busses/i2c-i801 for details.
-*/
+ * Supports the following Intel I/O Controller Hubs (ICH):
+ *
+ * I/O Block I2C
+ * region SMBus Block proc. block
+ * Chip name PCI ID size PEC buffer call read
+ * ---------------------------------------------------------------------------
+ * 82801AA (ICH) 0x2413 16 no no no no
+ * 82801AB (ICH0) 0x2423 16 no no no no
+ * 82801BA (ICH2) 0x2443 16 no no no no
+ * 82801CA (ICH3) 0x2483 32 soft no no no
+ * 82801DB (ICH4) 0x24c3 32 hard yes no no
+ * 82801E (ICH5) 0x24d3 32 hard yes yes yes
+ * 6300ESB 0x25a4 32 hard yes yes yes
+ * 82801F (ICH6) 0x266a 32 hard yes yes yes
+ * 6310ESB/6320ESB 0x269b 32 hard yes yes yes
+ * 82801G (ICH7) 0x27da 32 hard yes yes yes
+ * 82801H (ICH8) 0x283e 32 hard yes yes yes
+ * 82801I (ICH9) 0x2930 32 hard yes yes yes
+ * EP80579 (Tolapai) 0x5032 32 hard yes yes yes
+ * ICH10 0x3a30 32 hard yes yes yes
+ * ICH10 0x3a60 32 hard yes yes yes
+ * 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes
+ * 6 Series (PCH) 0x1c22 32 hard yes yes yes
+ * Patsburg (PCH) 0x1d22 32 hard yes yes yes
+ * Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes
+ * Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes
+ * Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes
+ * DH89xxCC (PCH) 0x2330 32 hard yes yes yes
+ * Panther Point (PCH) 0x1e22 32 hard yes yes yes
+ * Lynx Point (PCH) 0x8c22 32 hard yes yes yes
+ * Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes
+ * Avoton (SOC) 0x1f3c 32 hard yes yes yes
+ * Wellsburg (PCH) 0x8d22 32 hard yes yes yes
+ * Wellsburg (PCH) MS 0x8d7d 32 hard yes yes yes
+ * Wellsburg (PCH) MS 0x8d7e 32 hard yes yes yes
+ * Wellsburg (PCH) MS 0x8d7f 32 hard yes yes yes
+ * Coleto Creek (PCH) 0x23b0 32 hard yes yes yes
+ * Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes
+ * Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
+ * BayTrail (SOC) 0x0f12 32 hard yes yes yes
+ *
+ * Features supported by this driver:
+ * Software PEC no
+ * Hardware PEC yes
+ * Block buffer yes
+ * Block process call transaction no
+ * I2C block read transaction yes (doesn't use the block buffer)
+ * Slave mode no
+ * Interrupt processing yes
+ *
+ * See the file Documentation/i2c/busses/i2c-i801 for details.
+ */
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -162,24 +163,25 @@
STATUS_ERROR_FLAGS)
/* Older devices have their ID defined in <linux/pci_ids.h> */
-#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
-#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
+#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
-#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
-#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
-#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
-#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
-#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d
-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
-#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
-#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72
+#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS 0x1e22
+#define PCI_DEVICE_ID_INTEL_AVOTON_SMBUS 0x1f3c
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330
+#define PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS 0x23b0
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS 0x8ca2
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS 0x8d22
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0 0x8d7d
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1 0x8d7e
+#define PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2 0x8d7f
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2
struct i801_mux_config {
@@ -823,6 +825,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
{ 0, }
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index aa8bc146718b..613069bc561a 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -735,10 +735,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
clk_disable_unprepare(i2c_imx->clk);
dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq);
- dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n",
- res->start, res->end);
- dev_dbg(&i2c_imx->adapter.dev, "allocated %d bytes at 0x%x\n",
- resource_size(res), res->start);
+ dev_dbg(&i2c_imx->adapter.dev, "device resources: %pR\n", res);
dev_dbg(&i2c_imx->adapter.dev, "adapter name: \"%s\"\n",
i2c_imx->adapter.name);
dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 6a32aa095f83..0edf630b099a 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -341,8 +341,7 @@ static u32 mpc_i2c_get_sec_cfg_8xxx(void)
iounmap(reg);
}
}
- if (node)
- of_node_put(node);
+ of_node_put(node);
return val;
}
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 9f4b775e2e39..6dc5ded86f62 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -863,7 +863,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
drv_data->adapter.dev.parent = &pd->dev;
drv_data->adapter.algo = &mv64xxx_i2c_algo;
drv_data->adapter.owner = THIS_MODULE;
- drv_data->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
+ drv_data->adapter.class = I2C_CLASS_DEPRECATED;
drv_data->adapter.nr = pd->id;
drv_data->adapter.dev.of_node = pd->dev.of_node;
platform_set_drvdata(pd, drv_data);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 0e55d85fd4ed..9ad038d223c4 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -1032,10 +1032,10 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
adap = &dev->adap;
adap->dev.of_node = np;
adap->dev.parent = &adev->dev;
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
- adap->algo = &nmk_i2c_algo;
- adap->timeout = msecs_to_jiffies(dev->timeout);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_DEPRECATED;
+ adap->algo = &nmk_i2c_algo;
+ adap->timeout = msecs_to_jiffies(dev->timeout);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C at %pR", &adev->res);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 0e10cc6182f0..2a4fe0b7cfb7 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -239,15 +239,15 @@ static u32 ocores_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm ocores_algorithm = {
- .master_xfer = ocores_xfer,
- .functionality = ocores_func,
+ .master_xfer = ocores_xfer,
+ .functionality = ocores_func,
};
static struct i2c_adapter ocores_adapter = {
- .owner = THIS_MODULE,
- .name = "i2c-ocores",
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED,
- .algo = &ocores_algorithm,
+ .owner = THIS_MODULE,
+ .name = "i2c-ocores",
+ .class = I2C_CLASS_DEPRECATED,
+ .algo = &ocores_algorithm,
};
static const struct of_device_id ocores_i2c_match[] = {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b182793a4051..0dffb0e62c3b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1236,7 +1236,7 @@ omap_i2c_probe(struct platform_device *pdev)
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
+ adap->class = I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 2a5efb5b487c..3a4d64e1dfb1 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -633,13 +633,17 @@ static int qup_i2c_probe(struct platform_device *pdev)
* associated with each byte written/received
*/
size = QUP_OUTPUT_BLOCK_SIZE(io_mode);
- if (size >= ARRAY_SIZE(blk_sizes))
- return -EIO;
+ if (size >= ARRAY_SIZE(blk_sizes)) {
+ ret = -EIO;
+ goto fail;
+ }
qup->out_blk_sz = blk_sizes[size] / 2;
size = QUP_INPUT_BLOCK_SIZE(io_mode);
- if (size >= ARRAY_SIZE(blk_sizes))
- return -EIO;
+ if (size >= ARRAY_SIZE(blk_sizes)) {
+ ret = -EIO;
+ goto fail;
+ }
qup->in_blk_sz = blk_sizes[size] / 2;
size = QUP_OUTPUT_FIFO_SIZE(io_mode);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 899405923678..f3c7139dfa25 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -541,13 +541,13 @@ static int rcar_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
init_waitqueue_head(&priv->wait);
- adap = &priv->adap;
- adap->nr = pdev->id;
- adap->algo = &rcar_i2c_algo;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
- adap->retries = 3;
- adap->dev.parent = dev;
- adap->dev.of_node = dev->of_node;
+ adap = &priv->adap;
+ adap->nr = pdev->id;
+ adap->algo = &rcar_i2c_algo;
+ adap->class = I2C_CLASS_DEPRECATED;
+ adap->retries = 3;
+ adap->dev.parent = dev;
+ adap->dev.of_node = dev->of_node;
i2c_set_adapdata(adap, priv);
strlcpy(adap->name, pdev->name, sizeof(adap->name));
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index a9791509966a..69e11853e8bf 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -399,7 +399,7 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
}
/* is there anything left to handle? */
- if (unlikely(ipd == 0))
+ if (unlikely((ipd & REG_INT_ALL) == 0))
goto out;
switch (i2c->state) {
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index e828a1dba0e5..e086fb075f2b 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1128,11 +1128,11 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
s3c24xx_i2c_parse_dt(pdev->dev.of_node, i2c);
strlcpy(i2c->adap.name, "s3c2410-i2c", sizeof(i2c->adap.name));
- i2c->adap.owner = THIS_MODULE;
- i2c->adap.algo = &s3c24xx_i2c_algorithm;
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.algo = &s3c24xx_i2c_algorithm;
i2c->adap.retries = 2;
- i2c->adap.class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED;
- i2c->tx_setup = 50;
+ i2c->adap.class = I2C_CLASS_DEPRECATED;
+ i2c->tx_setup = 50;
init_waitqueue_head(&i2c->wait);
@@ -1267,7 +1267,7 @@ static int s3c24xx_i2c_suspend_noirq(struct device *dev)
return 0;
}
-static int s3c24xx_i2c_resume(struct device *dev)
+static int s3c24xx_i2c_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
@@ -1285,7 +1285,11 @@ static int s3c24xx_i2c_resume(struct device *dev)
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend_noirq = s3c24xx_i2c_suspend_noirq,
- .resume = s3c24xx_i2c_resume,
+ .resume_noirq = s3c24xx_i2c_resume_noirq,
+ .freeze_noirq = s3c24xx_i2c_suspend_noirq,
+ .thaw_noirq = s3c24xx_i2c_resume_noirq,
+ .poweroff_noirq = s3c24xx_i2c_suspend_noirq,
+ .restore_noirq = s3c24xx_i2c_resume_noirq,
#endif
};
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
deleted file mode 100644
index dd186a037684..000000000000
--- a/drivers/i2c/busses/i2c-s6000.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * drivers/i2c/busses/i2c-s6000.c
- *
- * Description: Driver for S6000 Family I2C Interface
- * Copyright (c) 2008 emlix GmbH
- * Author: Oskar Schirmer <oskar@scara.com>
- *
- * Partially based on i2c-bfin-twi.c driver by <sonic.zhang@analog.com>
- * Copyright (c) 2005-2007 Analog Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/i2c/s6000.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "i2c-s6000.h"
-
-#define DRV_NAME "i2c-s6000"
-
-#define POLL_TIMEOUT (2 * HZ)
-
-struct s6i2c_if {
- u8 __iomem *reg; /* memory mapped registers */
- int irq;
- spinlock_t lock;
- struct i2c_msg *msgs; /* messages currently handled */
- int msgs_num; /* nb of msgs to do */
- int msgs_push; /* nb of msgs read/written */
- int msgs_done; /* nb of msgs finally handled */
- unsigned push; /* nb of bytes read/written in msg */
- unsigned done; /* nb of bytes finally handled */
- int timeout_count; /* timeout retries left */
- struct timer_list timeout_timer;
- struct i2c_adapter adap;
- struct completion complete;
- struct clk *clk;
- struct resource *res;
-};
-
-static inline u16 i2c_rd16(struct s6i2c_if *iface, unsigned n)
-{
- return readw(iface->reg + (n));
-}
-
-static inline void i2c_wr16(struct s6i2c_if *iface, unsigned n, u16 v)
-{
- writew(v, iface->reg + (n));
-}
-
-static inline u32 i2c_rd32(struct s6i2c_if *iface, unsigned n)
-{
- return readl(iface->reg + (n));
-}
-
-static inline void i2c_wr32(struct s6i2c_if *iface, unsigned n, u32 v)
-{
- writel(v, iface->reg + (n));
-}
-
-static struct s6i2c_if s6i2c_if;
-
-static void s6i2c_handle_interrupt(struct s6i2c_if *iface)
-{
- if (i2c_rd16(iface, S6_I2C_INTRSTAT) & (1 << S6_I2C_INTR_TXABRT)) {
- i2c_rd16(iface, S6_I2C_CLRTXABRT);
- i2c_wr16(iface, S6_I2C_INTRMASK, 0);
- complete(&iface->complete);
- return;
- }
- if (iface->msgs_done >= iface->msgs_num) {
- dev_err(&iface->adap.dev, "s6i2c: spurious I2C irq: %04x\n",
- i2c_rd16(iface, S6_I2C_INTRSTAT));
- i2c_wr16(iface, S6_I2C_INTRMASK, 0);
- return;
- }
- while ((iface->msgs_push < iface->msgs_num)
- && (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_TFNF))) {
- struct i2c_msg *m = &iface->msgs[iface->msgs_push];
- if (!(m->flags & I2C_M_RD))
- i2c_wr16(iface, S6_I2C_DATACMD, m->buf[iface->push]);
- else
- i2c_wr16(iface, S6_I2C_DATACMD,
- 1 << S6_I2C_DATACMD_READ);
- if (++iface->push >= m->len) {
- iface->push = 0;
- iface->msgs_push += 1;
- }
- }
- do {
- struct i2c_msg *m = &iface->msgs[iface->msgs_done];
- if (!(m->flags & I2C_M_RD)) {
- if (iface->msgs_done < iface->msgs_push)
- iface->msgs_done += 1;
- else
- break;
- } else if (i2c_rd16(iface, S6_I2C_STATUS)
- & (1 << S6_I2C_STATUS_RFNE)) {
- m->buf[iface->done] = i2c_rd16(iface, S6_I2C_DATACMD);
- if (++iface->done >= m->len) {
- iface->done = 0;
- iface->msgs_done += 1;
- }
- } else{
- break;
- }
- } while (iface->msgs_done < iface->msgs_num);
- if (iface->msgs_done >= iface->msgs_num) {
- i2c_wr16(iface, S6_I2C_INTRMASK, 1 << S6_I2C_INTR_TXABRT);
- complete(&iface->complete);
- } else if (iface->msgs_push >= iface->msgs_num) {
- i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) |
- (1 << S6_I2C_INTR_RXFULL));
- } else {
- i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXABRT) |
- (1 << S6_I2C_INTR_TXEMPTY) |
- (1 << S6_I2C_INTR_RXFULL));
- }
-}
-
-static irqreturn_t s6i2c_interrupt_entry(int irq, void *dev_id)
-{
- struct s6i2c_if *iface = dev_id;
- if (!(i2c_rd16(iface, S6_I2C_STATUS) & ((1 << S6_I2C_INTR_RXUNDER)
- | (1 << S6_I2C_INTR_RXOVER)
- | (1 << S6_I2C_INTR_RXFULL)
- | (1 << S6_I2C_INTR_TXOVER)
- | (1 << S6_I2C_INTR_TXEMPTY)
- | (1 << S6_I2C_INTR_RDREQ)
- | (1 << S6_I2C_INTR_TXABRT)
- | (1 << S6_I2C_INTR_RXDONE)
- | (1 << S6_I2C_INTR_ACTIVITY)
- | (1 << S6_I2C_INTR_STOPDET)
- | (1 << S6_I2C_INTR_STARTDET)
- | (1 << S6_I2C_INTR_GENCALL))))
- return IRQ_NONE;
-
- spin_lock(&iface->lock);
- del_timer(&iface->timeout_timer);
- s6i2c_handle_interrupt(iface);
- spin_unlock(&iface->lock);
- return IRQ_HANDLED;
-}
-
-static void s6i2c_timeout(unsigned long data)
-{
- struct s6i2c_if *iface = (struct s6i2c_if *)data;
- unsigned long flags;
-
- spin_lock_irqsave(&iface->lock, flags);
- s6i2c_handle_interrupt(iface);
- if (--iface->timeout_count > 0) {
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
- } else {
- complete(&iface->complete);
- i2c_wr16(iface, S6_I2C_INTRMASK, 0);
- }
- spin_unlock_irqrestore(&iface->lock, flags);
-}
-
-static int s6i2c_master_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
-{
- struct s6i2c_if *iface = adap->algo_data;
- int i;
- if (num == 0)
- return 0;
- if (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY))
- yield();
- i2c_wr16(iface, S6_I2C_INTRMASK, 0);
- i2c_rd16(iface, S6_I2C_CLRINTR);
- for (i = 0; i < num; i++) {
- if (msgs[i].flags & I2C_M_TEN) {
- dev_err(&adap->dev,
- "s6i2c: 10 bits addr not supported\n");
- return -EINVAL;
- }
- if (msgs[i].len == 0) {
- dev_err(&adap->dev,
- "s6i2c: zero length message not supported\n");
- return -EINVAL;
- }
- if (msgs[i].addr != msgs[0].addr) {
- dev_err(&adap->dev,
- "s6i2c: multiple xfer cannot change target\n");
- return -EINVAL;
- }
- }
-
- iface->msgs = msgs;
- iface->msgs_num = num;
- iface->msgs_push = 0;
- iface->msgs_done = 0;
- iface->push = 0;
- iface->done = 0;
- iface->timeout_count = 10;
- i2c_wr16(iface, S6_I2C_TAR, msgs[0].addr);
- i2c_wr16(iface, S6_I2C_ENABLE, 1);
- i2c_wr16(iface, S6_I2C_INTRMASK, (1 << S6_I2C_INTR_TXEMPTY) |
- (1 << S6_I2C_INTR_TXABRT));
-
- iface->timeout_timer.expires = jiffies + POLL_TIMEOUT;
- add_timer(&iface->timeout_timer);
- wait_for_completion(&iface->complete);
- del_timer_sync(&iface->timeout_timer);
- while (i2c_rd32(iface, S6_I2C_TXFLR) > 0)
- schedule();
- while (i2c_rd16(iface, S6_I2C_STATUS) & (1 << S6_I2C_STATUS_ACTIVITY))
- schedule();
-
- i2c_wr16(iface, S6_I2C_INTRMASK, 0);
- i2c_wr16(iface, S6_I2C_ENABLE, 0);
- return iface->msgs_done;
-}
-
-static u32 s6i2c_functionality(struct i2c_adapter *adap)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static struct i2c_algorithm s6i2c_algorithm = {
- .master_xfer = s6i2c_master_xfer,
- .functionality = s6i2c_functionality,
-};
-
-static u16 nanoseconds_on_clk(struct s6i2c_if *iface, u32 ns)
-{
- u32 dividend = ((clk_get_rate(iface->clk) / 1000) * ns) / 1000000;
- if (dividend > 0xffff)
- return 0xffff;
- return dividend;
-}
-
-static int s6i2c_probe(struct platform_device *dev)
-{
- struct s6i2c_if *iface = &s6i2c_if;
- struct i2c_adapter *p_adap;
- const char *clock;
- int bus_num, rc;
- spin_lock_init(&iface->lock);
- init_completion(&iface->complete);
- iface->irq = platform_get_irq(dev, 0);
- if (iface->irq < 0) {
- rc = iface->irq;
- goto err_out;
- }
- iface->res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!iface->res) {
- rc = -ENXIO;
- goto err_out;
- }
- iface->res = request_mem_region(iface->res->start,
- resource_size(iface->res),
- dev->dev.bus_id);
- if (!iface->res) {
- rc = -EBUSY;
- goto err_out;
- }
- iface->reg = ioremap_nocache(iface->res->start,
- resource_size(iface->res));
- if (!iface->reg) {
- rc = -ENOMEM;
- goto err_reg;
- }
-
- clock = 0;
- bus_num = -1;
- if (dev_get_platdata(&dev->dev)) {
- struct s6_i2c_platform_data *pdata =
- dev_get_platdata(&dev->dev);
- bus_num = pdata->bus_num;
- clock = pdata->clock;
- }
- iface->clk = clk_get(&dev->dev, clock);
- if (IS_ERR(iface->clk)) {
- rc = PTR_ERR(iface->clk);
- goto err_map;
- }
- rc = clk_enable(iface->clk);
- if (rc < 0)
- goto err_clk_put;
- init_timer(&iface->timeout_timer);
- iface->timeout_timer.function = s6i2c_timeout;
- iface->timeout_timer.data = (unsigned long)iface;
-
- p_adap = &iface->adap;
- strlcpy(p_adap->name, dev->name, sizeof(p_adap->name));
- p_adap->algo = &s6i2c_algorithm;
- p_adap->algo_data = iface;
- p_adap->nr = bus_num;
- p_adap->class = 0;
- p_adap->dev.parent = &dev->dev;
- i2c_wr16(iface, S6_I2C_INTRMASK, 0);
- rc = request_irq(iface->irq, s6i2c_interrupt_entry,
- IRQF_SHARED, dev->name, iface);
- if (rc) {
- dev_err(&p_adap->dev, "s6i2c: can't get IRQ %d\n", iface->irq);
- goto err_clk_dis;
- }
-
- i2c_wr16(iface, S6_I2C_ENABLE, 0);
- udelay(1);
- i2c_wr32(iface, S6_I2C_SRESET, 1 << S6_I2C_SRESET_IC_SRST);
- i2c_wr16(iface, S6_I2C_CLRTXABRT, 1);
- i2c_wr16(iface, S6_I2C_CON,
- (1 << S6_I2C_CON_MASTER) |
- (S6_I2C_CON_SPEED_NORMAL << S6_I2C_CON_SPEED) |
- (0 << S6_I2C_CON_10BITSLAVE) |
- (0 << S6_I2C_CON_10BITMASTER) |
- (1 << S6_I2C_CON_RESTARTENA) |
- (1 << S6_I2C_CON_SLAVEDISABLE));
- i2c_wr16(iface, S6_I2C_SSHCNT, nanoseconds_on_clk(iface, 4000));
- i2c_wr16(iface, S6_I2C_SSLCNT, nanoseconds_on_clk(iface, 4700));
- i2c_wr16(iface, S6_I2C_FSHCNT, nanoseconds_on_clk(iface, 600));
- i2c_wr16(iface, S6_I2C_FSLCNT, nanoseconds_on_clk(iface, 1300));
- i2c_wr16(iface, S6_I2C_RXTL, 0);
- i2c_wr16(iface, S6_I2C_TXTL, 0);
-
- platform_set_drvdata(dev, iface);
- rc = i2c_add_numbered_adapter(p_adap);
- if (rc)
- goto err_irq_free;
- return 0;
-
-err_irq_free:
- free_irq(iface->irq, iface);
-err_clk_dis:
- clk_disable(iface->clk);
-err_clk_put:
- clk_put(iface->clk);
-err_map:
- iounmap(iface->reg);
-err_reg:
- release_mem_region(iface->res->start,
- resource_size(iface->res));
-err_out:
- return rc;
-}
-
-static int s6i2c_remove(struct platform_device *pdev)
-{
- struct s6i2c_if *iface = platform_get_drvdata(pdev);
- i2c_wr16(iface, S6_I2C_ENABLE, 0);
- i2c_del_adapter(&iface->adap);
- free_irq(iface->irq, iface);
- clk_disable(iface->clk);
- clk_put(iface->clk);
- iounmap(iface->reg);
- release_mem_region(iface->res->start,
- resource_size(iface->res));
- return 0;
-}
-
-static struct platform_driver s6i2c_driver = {
- .probe = s6i2c_probe,
- .remove = s6i2c_remove,
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s6i2c_init(void)
-{
- pr_info("I2C: S6000 I2C driver\n");
- return platform_driver_register(&s6i2c_driver);
-}
-
-static void __exit s6i2c_exit(void)
-{
- platform_driver_unregister(&s6i2c_driver);
-}
-
-MODULE_DESCRIPTION("I2C-Bus adapter routines for S6000 I2C");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
-
-subsys_initcall(s6i2c_init);
-module_exit(s6i2c_exit);
diff --git a/drivers/i2c/busses/i2c-s6000.h b/drivers/i2c/busses/i2c-s6000.h
deleted file mode 100644
index 4936f9f2256f..000000000000
--- a/drivers/i2c/busses/i2c-s6000.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * drivers/i2c/busses/i2c-s6000.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2008 Emlix GmbH <info@emlix.com>
- * Author: Oskar Schirmer <oskar@scara.com>
- */
-
-#ifndef __DRIVERS_I2C_BUSSES_I2C_S6000_H
-#define __DRIVERS_I2C_BUSSES_I2C_S6000_H
-
-#define S6_I2C_CON 0x000
-#define S6_I2C_CON_MASTER 0
-#define S6_I2C_CON_SPEED 1
-#define S6_I2C_CON_SPEED_NORMAL 1
-#define S6_I2C_CON_SPEED_FAST 2
-#define S6_I2C_CON_SPEED_MASK 3
-#define S6_I2C_CON_10BITSLAVE 3
-#define S6_I2C_CON_10BITMASTER 4
-#define S6_I2C_CON_RESTARTENA 5
-#define S6_I2C_CON_SLAVEDISABLE 6
-#define S6_I2C_TAR 0x004
-#define S6_I2C_TAR_GCORSTART 10
-#define S6_I2C_TAR_SPECIAL 11
-#define S6_I2C_SAR 0x008
-#define S6_I2C_HSMADDR 0x00C
-#define S6_I2C_DATACMD 0x010
-#define S6_I2C_DATACMD_READ 8
-#define S6_I2C_SSHCNT 0x014
-#define S6_I2C_SSLCNT 0x018
-#define S6_I2C_FSHCNT 0x01C
-#define S6_I2C_FSLCNT 0x020
-#define S6_I2C_INTRSTAT 0x02C
-#define S6_I2C_INTRMASK 0x030
-#define S6_I2C_RAWINTR 0x034
-#define S6_I2C_INTR_RXUNDER 0
-#define S6_I2C_INTR_RXOVER 1
-#define S6_I2C_INTR_RXFULL 2
-#define S6_I2C_INTR_TXOVER 3
-#define S6_I2C_INTR_TXEMPTY 4
-#define S6_I2C_INTR_RDREQ 5
-#define S6_I2C_INTR_TXABRT 6
-#define S6_I2C_INTR_RXDONE 7
-#define S6_I2C_INTR_ACTIVITY 8
-#define S6_I2C_INTR_STOPDET 9
-#define S6_I2C_INTR_STARTDET 10
-#define S6_I2C_INTR_GENCALL 11
-#define S6_I2C_RXTL 0x038
-#define S6_I2C_TXTL 0x03C
-#define S6_I2C_CLRINTR 0x040
-#define S6_I2C_CLRRXUNDER 0x044
-#define S6_I2C_CLRRXOVER 0x048
-#define S6_I2C_CLRTXOVER 0x04C
-#define S6_I2C_CLRRDREQ 0x050
-#define S6_I2C_CLRTXABRT 0x054
-#define S6_I2C_CLRRXDONE 0x058
-#define S6_I2C_CLRACTIVITY 0x05C
-#define S6_I2C_CLRSTOPDET 0x060
-#define S6_I2C_CLRSTARTDET 0x064
-#define S6_I2C_CLRGENCALL 0x068
-#define S6_I2C_ENABLE 0x06C
-#define S6_I2C_STATUS 0x070
-#define S6_I2C_STATUS_ACTIVITY 0
-#define S6_I2C_STATUS_TFNF 1
-#define S6_I2C_STATUS_TFE 2
-#define S6_I2C_STATUS_RFNE 3
-#define S6_I2C_STATUS_RFF 4
-#define S6_I2C_TXFLR 0x074
-#define S6_I2C_RXFLR 0x078
-#define S6_I2C_SRESET 0x07C
-#define S6_I2C_SRESET_IC_SRST 0
-#define S6_I2C_SRESET_IC_MASTER_SRST 1
-#define S6_I2C_SRESET_IC_SLAVE_SRST 2
-#define S6_I2C_TXABRTSOURCE 0x080
-
-#endif
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index a3216defc1d3..b1336d5f0531 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -311,7 +311,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
goto out;
}
adap = &siic->adapter;
- adap->class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
+ adap->class = I2C_CLASS_DEPRECATED;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 95b947670386..2e4eccd6599a 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -206,25 +206,31 @@ static inline void st_i2c_clr_bits(void __iomem *reg, u32 mask)
writel_relaxed(readl_relaxed(reg) & ~mask, reg);
}
-/* From I2C Specifications v0.5 */
+/*
+ * From I2C Specifications v0.5.
+ *
+ * All the values below have +10% margin added to be
+ * compatible with some out-of-spec devices,
+ * like HDMI link of the Toshiba 19AV600 TV.
+ */
static struct st_i2c_timings i2c_timings[] = {
[I2C_MODE_STANDARD] = {
.rate = 100000,
- .rep_start_hold = 4000,
- .rep_start_setup = 4700,
- .start_hold = 4000,
- .data_setup_time = 250,
- .stop_setup_time = 4000,
- .bus_free_time = 4700,
+ .rep_start_hold = 4400,
+ .rep_start_setup = 5170,
+ .start_hold = 4400,
+ .data_setup_time = 275,
+ .stop_setup_time = 4400,
+ .bus_free_time = 5170,
},
[I2C_MODE_FAST] = {
.rate = 400000,
- .rep_start_hold = 600,
- .rep_start_setup = 600,
- .start_hold = 600,
- .data_setup_time = 100,
- .stop_setup_time = 600,
- .bus_free_time = 1300,
+ .rep_start_hold = 660,
+ .rep_start_setup = 660,
+ .start_hold = 660,
+ .data_setup_time = 110,
+ .stop_setup_time = 660,
+ .bus_free_time = 1430,
},
};
@@ -815,7 +821,7 @@ static int st_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adap;
i2c_set_adapdata(adap, i2c_dev);
- snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%x)", res->start);
+ snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start);
adap->owner = THIS_MODULE;
adap->timeout = 2 * HZ;
adap->retries = 0;
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index fefb1c19ec1d..6a44f37798c8 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -909,7 +909,7 @@ static int stu300_probe(struct platform_device *pdev)
adap = &dev->adapter;
adap->owner = THIS_MODULE;
/* DDC class but actually often used for more generic I2C */
- adap->class = I2C_CLASS_DDC | I2C_CLASS_DEPRECATED;
+ adap->class = I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
sizeof(adap->name));
adap->nr = bus_nr;
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 057602683553..10855a0b7e7f 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -311,19 +311,8 @@ static struct serio_driver taos_drv = {
.interrupt = taos_interrupt,
};
-static int __init taos_init(void)
-{
- return serio_register_driver(&taos_drv);
-}
-
-static void __exit taos_exit(void)
-{
- serio_unregister_driver(&taos_drv);
-}
+module_serio_driver(taos_drv);
MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("TAOS evaluation module driver");
MODULE_LICENSE("GPL");
-
-module_init(taos_init);
-module_exit(taos_exit);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index f1bb2fc06791..87d0371cebb7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -792,7 +792,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
i2c_dev->adapter.owner = THIS_MODULE;
- i2c_dev->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_DEPRECATED;
+ i2c_dev->adapter.class = I2C_CLASS_DEPRECATED;
strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
sizeof(i2c_dev->adapter.name));
i2c_dev->adapter.algo = &tegra_i2c_algo;
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 7731f1795869..ade9223912d3 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -677,15 +677,15 @@ static u32 xiic_func(struct i2c_adapter *adap)
}
static const struct i2c_algorithm xiic_algorithm = {
- .master_xfer = xiic_xfer,
- .functionality = xiic_func,
+ .master_xfer = xiic_xfer,
+ .functionality = xiic_func,
};
static struct i2c_adapter xiic_adapter = {
- .owner = THIS_MODULE,
- .name = DRIVER_NAME,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD | I2C_CLASS_DEPRECATED,
- .algo = &xiic_algorithm,
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .class = I2C_CLASS_DEPRECATED,
+ .algo = &xiic_algorithm,
};
diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c
deleted file mode 100644
index 8eadf0f47ad7..000000000000
--- a/drivers/i2c/busses/scx200_i2c.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/* linux/drivers/i2c/busses/scx200_i2c.c
-
- Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
-
- National Semiconductor SCx200 I2C bus on GPIO pins
-
- Based on i2c-velleman.c Copyright (C) 1995-96, 2000 Simon G. Vogl
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include <linux/io.h>
-
-#include <linux/scx200_gpio.h>
-
-MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
-MODULE_DESCRIPTION("NatSemi SCx200 I2C Driver");
-MODULE_LICENSE("GPL");
-
-static int scl = CONFIG_SCx200_I2C_SCL;
-static int sda = CONFIG_SCx200_I2C_SDA;
-
-module_param(scl, int, 0);
-MODULE_PARM_DESC(scl, "GPIO line for SCL");
-module_param(sda, int, 0);
-MODULE_PARM_DESC(sda, "GPIO line for SDA");
-
-static void scx200_i2c_setscl(void *data, int state)
-{
- scx200_gpio_set(scl, state);
-}
-
-static void scx200_i2c_setsda(void *data, int state)
-{
- scx200_gpio_set(sda, state);
-}
-
-static int scx200_i2c_getscl(void *data)
-{
- return scx200_gpio_get(scl);
-}
-
-static int scx200_i2c_getsda(void *data)
-{
- return scx200_gpio_get(sda);
-}
-
-/* ------------------------------------------------------------------------
- * Encapsulate the above functions in the correct operations structure.
- * This is only done when more than one hardware adapter is supported.
- */
-
-static struct i2c_algo_bit_data scx200_i2c_data = {
- .setsda = scx200_i2c_setsda,
- .setscl = scx200_i2c_setscl,
- .getsda = scx200_i2c_getsda,
- .getscl = scx200_i2c_getscl,
- .udelay = 10,
- .timeout = HZ,
-};
-
-static struct i2c_adapter scx200_i2c_ops = {
- .owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
- .algo_data = &scx200_i2c_data,
- .name = "NatSemi SCx200 I2C",
-};
-
-static int scx200_i2c_init(void)
-{
- pr_debug("NatSemi SCx200 I2C Driver\n");
-
- if (!scx200_gpio_present()) {
- pr_err("no SCx200 gpio pins available\n");
- return -ENODEV;
- }
-
- pr_debug("SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda);
-
- if (scl == -1 || sda == -1 || scl == sda) {
- pr_err("scl and sda must be specified\n");
- return -EINVAL;
- }
-
- /* Configure GPIOs as open collector outputs */
- scx200_gpio_configure(scl, ~2, 5);
- scx200_gpio_configure(sda, ~2, 5);
-
- if (i2c_bit_add_bus(&scx200_i2c_ops) < 0) {
- pr_err("adapter %s registration failed\n", scx200_i2c_ops.name);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void scx200_i2c_cleanup(void)
-{
- i2c_del_adapter(&scx200_i2c_ops);
-}
-
-module_init(scx200_i2c_init);
-module_exit(scx200_i2c_cleanup);
-
-/*
- Local variables:
- compile-command: "make -k -C ../.. SUBDIRS=drivers/i2c modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/drivers/i2c/i2c-acpi.c b/drivers/i2c/i2c-acpi.c
new file mode 100644
index 000000000000..e8b61967334b
--- /dev/null
+++ b/drivers/i2c/i2c-acpi.c
@@ -0,0 +1,362 @@
+/*
+ * I2C ACPI code
+ *
+ * Copyright (C) 2014 Intel Corp
+ *
+ * Author: Lan Tianyu <tianyu.lan@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#define pr_fmt(fmt) "I2C/ACPI : " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+
+struct acpi_i2c_handler_data {
+ struct acpi_connection_info info;
+ struct i2c_adapter *adapter;
+};
+
+struct gsb_buffer {
+ u8 status;
+ u8 len;
+ union {
+ u16 wdata;
+ u8 bdata;
+ u8 data[0];
+ };
+} __packed;
+
+static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct i2c_board_info *info = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_i2c_serialbus *sb;
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ info->addr = sb->slave_address;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
+ }
+ } else if (info->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ info->irq = r.start;
+ }
+
+ /* Tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct list_head resource_list;
+ struct i2c_board_info info;
+ struct acpi_device *adev;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ memset(&info, 0, sizeof(info));
+ info.acpi_node.companion = adev;
+ info.irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_i2c_add_resource, &info);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !info.addr)
+ return AE_OK;
+
+ adev->power.flags.ignore_parent = true;
+ strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
+ if (!i2c_new_device(adapter, &info)) {
+ adev->power.flags.ignore_parent = false;
+ dev_err(&adapter->dev,
+ "failed to add I2C device %s from ACPI\n",
+ dev_name(&adev->dev));
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * @adap: pointer to adapter
+ *
+ * Enumerate all I2C slave devices behind this adapter by walking the ACPI
+ * namespace. When a device is found it will be added to the Linux device
+ * model and bound to the corresponding ACPI handle.
+ */
+void acpi_i2c_register_devices(struct i2c_adapter *adap)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ if (!adap->dev.parent)
+ return;
+
+ handle = ACPI_HANDLE(adap->dev.parent);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_i2c_add_device, NULL,
+ adap, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
+}
+
+static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
+ u8 cmd, u8 *data, u8 data_len)
+{
+
+ struct i2c_msg msgs[2];
+ int ret;
+ u8 *buffer;
+
+ buffer = kzalloc(data_len, GFP_KERNEL);
+ if (!buffer)
+ return AE_NO_MEMORY;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags;
+ msgs[0].len = 1;
+ msgs[0].buf = &cmd;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = client->flags | I2C_M_RD;
+ msgs[1].len = data_len;
+ msgs[1].buf = buffer;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ dev_err(&client->adapter->dev, "i2c read failed\n");
+ else
+ memcpy(data, buffer, data_len);
+
+ kfree(buffer);
+ return ret;
+}
+
+static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
+ u8 cmd, u8 *data, u8 data_len)
+{
+
+ struct i2c_msg msgs[1];
+ u8 *buffer;
+ int ret = AE_OK;
+
+ buffer = kzalloc(data_len + 1, GFP_KERNEL);
+ if (!buffer)
+ return AE_NO_MEMORY;
+
+ buffer[0] = cmd;
+ memcpy(buffer + 1, data, data_len);
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags;
+ msgs[0].len = data_len + 1;
+ msgs[0].buf = buffer;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ dev_err(&client->adapter->dev, "i2c write failed\n");
+
+ kfree(buffer);
+ return ret;
+}
+
+static acpi_status
+acpi_i2c_space_handler(u32 function, acpi_physical_address command,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
+ struct acpi_i2c_handler_data *data = handler_context;
+ struct acpi_connection_info *info = &data->info;
+ struct acpi_resource_i2c_serialbus *sb;
+ struct i2c_adapter *adapter = data->adapter;
+ struct i2c_client client;
+ struct acpi_resource *ares;
+ u32 accessor_type = function >> 16;
+ u8 action = function & ACPI_IO_MASK;
+ acpi_status ret = AE_OK;
+ int status;
+
+ ret = acpi_buffer_to_resource(info->connection, info->length, &ares);
+ if (ACPI_FAILURE(ret))
+ return ret;
+
+ if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ memset(&client, 0, sizeof(client));
+ client.adapter = adapter;
+ client.addr = sb->slave_address;
+ client.flags = 0;
+
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ client.flags |= I2C_CLIENT_TEN;
+
+ switch (accessor_type) {
+ case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_byte(&client);
+ if (status >= 0) {
+ gsb->bdata = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_byte(&client, gsb->bdata);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_BYTE:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_byte_data(&client, command);
+ if (status >= 0) {
+ gsb->bdata = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_byte_data(&client, command,
+ gsb->bdata);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_WORD:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_word_data(&client, command);
+ if (status >= 0) {
+ gsb->wdata = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_word_data(&client, command,
+ gsb->wdata);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_BLOCK:
+ if (action == ACPI_READ) {
+ status = i2c_smbus_read_block_data(&client, command,
+ gsb->data);
+ if (status >= 0) {
+ gsb->len = status;
+ status = 0;
+ }
+ } else {
+ status = i2c_smbus_write_block_data(&client, command,
+ gsb->len, gsb->data);
+ }
+ break;
+
+ case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE:
+ if (action == ACPI_READ) {
+ status = acpi_gsb_i2c_read_bytes(&client, command,
+ gsb->data, info->access_length);
+ if (status > 0)
+ status = 0;
+ } else {
+ status = acpi_gsb_i2c_write_bytes(&client, command,
+ gsb->data, info->access_length);
+ }
+ break;
+
+ default:
+ pr_info("protocol(0x%02x) is not supported.\n", accessor_type);
+ ret = AE_BAD_PARAMETER;
+ goto err;
+ }
+
+ gsb->status = status;
+
+ err:
+ ACPI_FREE(ares);
+ return ret;
+}
+
+
+int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+{
+ acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
+ struct acpi_i2c_handler_data *data;
+ acpi_status status;
+
+ if (!handle)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(struct acpi_i2c_handler_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->adapter = adapter;
+ status = acpi_bus_attach_private_data(handle, (void *)data);
+ if (ACPI_FAILURE(status)) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ status = acpi_install_address_space_handler(handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &acpi_i2c_space_handler,
+ NULL,
+ data);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&adapter->dev, "Error installing i2c space handler\n");
+ acpi_bus_detach_private_data(handle);
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+{
+ acpi_handle handle = ACPI_HANDLE(adapter->dev.parent);
+ struct acpi_i2c_handler_data *data;
+ acpi_status status;
+
+ if (!handle)
+ return;
+
+ acpi_remove_address_space_handler(handle,
+ ACPI_ADR_SPACE_GSBUS,
+ &acpi_i2c_space_handler);
+
+ status = acpi_bus_get_private_data(handle, (void **)&data);
+ if (ACPI_SUCCESS(status))
+ kfree(data);
+
+ acpi_bus_detach_private_data(handle);
+}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 66aa83b99383..632057a44615 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1097,101 +1097,6 @@ EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
static void of_i2c_register_devices(struct i2c_adapter *adap) { }
#endif /* CONFIG_OF */
-/* ACPI support code */
-
-#if IS_ENABLED(CONFIG_ACPI)
-static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
-{
- struct i2c_board_info *info = data;
-
- if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- struct acpi_resource_i2c_serialbus *sb;
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
- info->addr = sb->slave_address;
- if (sb->access_mode == ACPI_I2C_10BIT_MODE)
- info->flags |= I2C_CLIENT_TEN;
- }
- } else if (info->irq < 0) {
- struct resource r;
-
- if (acpi_dev_resource_interrupt(ares, 0, &r))
- info->irq = r.start;
- }
-
- /* Tell the ACPI core to skip this resource */
- return 1;
-}
-
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
-{
- struct i2c_adapter *adapter = data;
- struct list_head resource_list;
- struct i2c_board_info info;
- struct acpi_device *adev;
- int ret;
-
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
- return AE_OK;
-
- memset(&info, 0, sizeof(info));
- info.acpi_node.companion = adev;
- info.irq = -1;
-
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_add_resource, &info);
- acpi_dev_free_resource_list(&resource_list);
-
- if (ret < 0 || !info.addr)
- return AE_OK;
-
- adev->power.flags.ignore_parent = true;
- strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
- if (!i2c_new_device(adapter, &info)) {
- adev->power.flags.ignore_parent = false;
- dev_err(&adapter->dev,
- "failed to add I2C device %s from ACPI\n",
- dev_name(&adev->dev));
- }
-
- return AE_OK;
-}
-
-/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
- * @adap: pointer to adapter
- *
- * Enumerate all I2C slave devices behind this adapter by walking the ACPI
- * namespace. When a device is found it will be added to the Linux device
- * model and bound to the corresponding ACPI handle.
- */
-static void acpi_i2c_register_devices(struct i2c_adapter *adap)
-{
- acpi_handle handle;
- acpi_status status;
-
- if (!adap->dev.parent)
- return;
-
- handle = ACPI_HANDLE(adap->dev.parent);
- if (!handle)
- return;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- acpi_i2c_add_device, NULL,
- adap, NULL);
- if (ACPI_FAILURE(status))
- dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
-}
-#else
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) {}
-#endif /* CONFIG_ACPI */
-
static int i2c_do_add_adapter(struct i2c_driver *driver,
struct i2c_adapter *adap)
{
@@ -1298,6 +1203,7 @@ exit_recovery:
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
acpi_i2c_register_devices(adap);
+ acpi_i2c_install_space_handler(adap);
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
@@ -1471,6 +1377,7 @@ void i2c_del_adapter(struct i2c_adapter *adap)
return;
}
+ acpi_i2c_remove_space_handler(adap);
/* Tell drivers about this removal */
mutex_lock(&core_lock);
bus_for_each_drv(&i2c_bus_type, NULL, adap,
@@ -2013,6 +1920,16 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
if (!driver->detect || !address_list)
return 0;
+ /* Warn that the adapter lost class based instantiation */
+ if (adapter->class == I2C_CLASS_DEPRECATED) {
+ dev_dbg(&adapter->dev,
+ "This adapter dropped support for I2C classes and "
+ "won't auto-detect %s devices anymore. If you need it, check "
+ "'Documentation/i2c/instantiating-devices' for alternatives.\n",
+ driver->driver.name);
+ return 0;
+ }
+
/* Stop here if the classes do not match */
if (!(adapter->class & driver->class))
return 0;
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index 77e4849d2f2a..d241aa295d96 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -2,7 +2,7 @@
i2c-stub.c - I2C/SMBus chip emulator
Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
- Copyright (C) 2007, 2012 Jean Delvare <jdelvare@suse.de>
+ Copyright (C) 2007-2014 Jean Delvare <jdelvare@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -27,28 +27,109 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/i2c.h>
+#include <linux/list.h>
#define MAX_CHIPS 10
-#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
- I2C_FUNC_SMBUS_I2C_BLOCK)
+
+/*
+ * Support for I2C_FUNC_SMBUS_BLOCK_DATA is disabled by default and must
+ * be enabled explicitly by setting the I2C_FUNC_SMBUS_BLOCK_DATA bits
+ * in the 'functionality' module parameter.
+ */
+#define STUB_FUNC_DEFAULT \
+ (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
+
+#define STUB_FUNC_ALL \
+ (STUB_FUNC_DEFAULT | I2C_FUNC_SMBUS_BLOCK_DATA)
static unsigned short chip_addr[MAX_CHIPS];
module_param_array(chip_addr, ushort, NULL, S_IRUGO);
MODULE_PARM_DESC(chip_addr,
"Chip addresses (up to 10, between 0x03 and 0x77)");
-static unsigned long functionality = STUB_FUNC;
+static unsigned long functionality = STUB_FUNC_DEFAULT;
module_param(functionality, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(functionality, "Override functionality bitfield");
+/* Some chips have banked register ranges */
+
+static u8 bank_reg[MAX_CHIPS];
+module_param_array(bank_reg, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bank_reg, "Bank register");
+
+static u8 bank_mask[MAX_CHIPS];
+module_param_array(bank_mask, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bank_mask, "Bank value mask");
+
+static u8 bank_start[MAX_CHIPS];
+module_param_array(bank_start, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bank_start, "First banked register");
+
+static u8 bank_end[MAX_CHIPS];
+module_param_array(bank_end, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bank_end, "Last banked register");
+
+struct smbus_block_data {
+ struct list_head node;
+ u8 command;
+ u8 len;
+ u8 block[I2C_SMBUS_BLOCK_MAX];
+};
+
struct stub_chip {
u8 pointer;
u16 words[256]; /* Byte operations use the LSB as per SMBus
specification */
+ struct list_head smbus_blocks;
+
+ /* For chips with banks, extra registers are allocated dynamically */
+ u8 bank_reg;
+ u8 bank_shift;
+ u8 bank_mask;
+ u8 bank_sel; /* Currently selected bank */
+ u8 bank_start;
+ u8 bank_end;
+ u16 bank_size;
+ u16 *bank_words; /* Room for bank_mask * bank_size registers */
};
static struct stub_chip *stub_chips;
+static int stub_chips_nr;
+
+static struct smbus_block_data *stub_find_block(struct device *dev,
+ struct stub_chip *chip,
+ u8 command, bool create)
+{
+ struct smbus_block_data *b, *rb = NULL;
+
+ list_for_each_entry(b, &chip->smbus_blocks, node) {
+ if (b->command == command) {
+ rb = b;
+ break;
+ }
+ }
+ if (rb == NULL && create) {
+ rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL);
+ if (rb == NULL)
+ return rb;
+ rb->command = command;
+ list_add(&rb->node, &chip->smbus_blocks);
+ }
+ return rb;
+}
+
+static u16 *stub_get_wordp(struct stub_chip *chip, u8 offset)
+{
+ if (chip->bank_sel &&
+ offset >= chip->bank_start && offset <= chip->bank_end)
+ return chip->bank_words +
+ (chip->bank_sel - 1) * chip->bank_size +
+ offset - chip->bank_start;
+ else
+ return chip->words + offset;
+}
/* Return negative errno on error. */
static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
@@ -57,9 +138,11 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
s32 ret;
int i, len;
struct stub_chip *chip = NULL;
+ struct smbus_block_data *b;
+ u16 *wordp;
/* Search for the right chip */
- for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
+ for (i = 0; i < stub_chips_nr; i++) {
if (addr == chip_addr[i]) {
chip = stub_chips + i;
break;
@@ -82,7 +165,8 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
"smbus byte - addr 0x%02x, wrote 0x%02x.\n",
addr, command);
} else {
- data->byte = chip->words[chip->pointer++] & 0xff;
+ wordp = stub_get_wordp(chip, chip->pointer++);
+ data->byte = *wordp & 0xff;
dev_dbg(&adap->dev,
"smbus byte - addr 0x%02x, read 0x%02x.\n",
addr, data->byte);
@@ -92,14 +176,25 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_BYTE_DATA:
+ wordp = stub_get_wordp(chip, command);
if (read_write == I2C_SMBUS_WRITE) {
- chip->words[command] &= 0xff00;
- chip->words[command] |= data->byte;
+ *wordp &= 0xff00;
+ *wordp |= data->byte;
dev_dbg(&adap->dev,
"smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n",
addr, data->byte, command);
+
+ /* Set the bank as needed */
+ if (chip->bank_words && command == chip->bank_reg) {
+ chip->bank_sel =
+ (data->byte >> chip->bank_shift)
+ & chip->bank_mask;
+ dev_dbg(&adap->dev,
+ "switching to bank %u.\n",
+ chip->bank_sel);
+ }
} else {
- data->byte = chip->words[command] & 0xff;
+ data->byte = *wordp & 0xff;
dev_dbg(&adap->dev,
"smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n",
addr, data->byte, command);
@@ -110,13 +205,14 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_WORD_DATA:
+ wordp = stub_get_wordp(chip, command);
if (read_write == I2C_SMBUS_WRITE) {
- chip->words[command] = data->word;
+ *wordp = data->word;
dev_dbg(&adap->dev,
"smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n",
addr, data->word, command);
} else {
- data->word = chip->words[command];
+ data->word = *wordp;
dev_dbg(&adap->dev,
"smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n",
addr, data->word, command);
@@ -126,6 +222,12 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
+ /*
+ * We ignore banks here, because banked chips don't use I2C
+ * block transfers
+ */
+ if (data->block[0] > 256 - command) /* Avoid overrun */
+ data->block[0] = 256 - command;
len = data->block[0];
if (read_write == I2C_SMBUS_WRITE) {
for (i = 0; i < len; i++) {
@@ -148,6 +250,55 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
ret = 0;
break;
+ case I2C_SMBUS_BLOCK_DATA:
+ /*
+ * We ignore banks here, because chips typically don't use both
+ * banks and SMBus block transfers
+ */
+ b = stub_find_block(&adap->dev, chip, command, false);
+ if (read_write == I2C_SMBUS_WRITE) {
+ len = data->block[0];
+ if (len == 0 || len > I2C_SMBUS_BLOCK_MAX) {
+ ret = -EINVAL;
+ break;
+ }
+ if (b == NULL) {
+ b = stub_find_block(&adap->dev, chip, command,
+ true);
+ if (b == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ /* Largest write sets read block length */
+ if (len > b->len)
+ b->len = len;
+ for (i = 0; i < len; i++)
+ b->block[i] = data->block[i + 1];
+ /* update for byte and word commands */
+ chip->words[command] = (b->block[0] << 8) | b->len;
+ dev_dbg(&adap->dev,
+ "smbus block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n",
+ addr, len, command);
+ } else {
+ if (b == NULL) {
+ dev_dbg(&adap->dev,
+ "SMBus block read command without prior block write not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ len = b->len;
+ data->block[0] = len;
+ for (i = 0; i < len; i++)
+ data->block[i + 1] = b->block[i];
+ dev_dbg(&adap->dev,
+ "smbus block data - addr 0x%02x, read %d bytes at 0x%02x.\n",
+ addr, len, command);
+ }
+
+ ret = 0;
+ break;
+
default:
dev_dbg(&adap->dev, "Unsupported I2C/SMBus command\n");
ret = -EOPNOTSUPP;
@@ -159,7 +310,7 @@ static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
static u32 stub_func(struct i2c_adapter *adapter)
{
- return STUB_FUNC & functionality;
+ return STUB_FUNC_ALL & functionality;
}
static const struct i2c_algorithm smbus_algorithm = {
@@ -174,6 +325,43 @@ static struct i2c_adapter stub_adapter = {
.name = "SMBus stub driver",
};
+static int __init i2c_stub_allocate_banks(int i)
+{
+ struct stub_chip *chip = stub_chips + i;
+
+ chip->bank_reg = bank_reg[i];
+ chip->bank_start = bank_start[i];
+ chip->bank_end = bank_end[i];
+ chip->bank_size = bank_end[i] - bank_start[i] + 1;
+
+ /* We assume that all bits in the mask are contiguous */
+ chip->bank_mask = bank_mask[i];
+ while (!(chip->bank_mask & 1)) {
+ chip->bank_shift++;
+ chip->bank_mask >>= 1;
+ }
+
+ chip->bank_words = kzalloc(chip->bank_mask * chip->bank_size *
+ sizeof(u16), GFP_KERNEL);
+ if (!chip->bank_words)
+ return -ENOMEM;
+
+ pr_debug("i2c-stub: Allocated %u banks of %u words each (registers 0x%02x to 0x%02x)\n",
+ chip->bank_mask, chip->bank_size, chip->bank_start,
+ chip->bank_end);
+
+ return 0;
+}
+
+static void i2c_stub_free(void)
+{
+ int i;
+
+ for (i = 0; i < stub_chips_nr; i++)
+ kfree(stub_chips[i].bank_words);
+ kfree(stub_chips);
+}
+
static int __init i2c_stub_init(void)
{
int i, ret;
@@ -194,22 +382,39 @@ static int __init i2c_stub_init(void)
}
/* Allocate memory for all chips at once */
- stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL);
+ stub_chips_nr = i;
+ stub_chips = kcalloc(stub_chips_nr, sizeof(struct stub_chip),
+ GFP_KERNEL);
if (!stub_chips) {
pr_err("i2c-stub: Out of memory\n");
return -ENOMEM;
}
+ for (i = 0; i < stub_chips_nr; i++) {
+ INIT_LIST_HEAD(&stub_chips[i].smbus_blocks);
+
+ /* Allocate extra memory for banked register ranges */
+ if (bank_mask[i]) {
+ ret = i2c_stub_allocate_banks(i);
+ if (ret)
+ goto fail_free;
+ }
+ }
ret = i2c_add_adapter(&stub_adapter);
if (ret)
- kfree(stub_chips);
+ goto fail_free;
+
+ return 0;
+
+ fail_free:
+ i2c_stub_free();
return ret;
}
static void __exit i2c_stub_exit(void)
{
i2c_del_adapter(&stub_adapter);
- kfree(stub_chips);
+ i2c_stub_free();
}
MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 9bd4212782ab..ec11b404b433 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -41,6 +41,7 @@
#include <linux/i2c-mux.h>
#include <linux/i2c/pca954x.h>
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#define PCA954X_MAX_NCHANS 8
@@ -273,9 +274,23 @@ static int pca954x_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int pca954x_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pca954x *data = i2c_get_clientdata(client);
+
+ data->last_chan = 0;
+ return i2c_smbus_write_byte(client, 0);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume);
+
static struct i2c_driver pca954x_driver = {
.driver = {
.name = "pca954x",
+ .pm = &pca954x_pm,
.owner = THIS_MODULE,
},
.probe = pca954x_probe,
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 00400c352c1a..766a71ccefed 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -604,16 +604,14 @@ static int c2_up(struct net_device *netdev)
tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
c2_port->mem_size = tx_size + rx_size;
- c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
- &c2_port->dma);
+ c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
+ &c2_port->dma);
if (c2_port->mem == NULL) {
pr_debug("Unable to allocate memory for "
"host descriptor rings\n");
return -ENOMEM;
}
- memset(c2_port->mem, 0, c2_port->mem_size);
-
/* Create the Rx host descriptor ring */
if ((ret =
c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 90200245c5eb..02120d340d50 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1003,13 +1003,13 @@ int nes_init_cqp(struct nes_device *nesdev)
(sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) +
sizeof(struct nes_hw_cqp_qp_context);
- nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
- &nesdev->cqp_pbase);
+ nesdev->cqp_vbase = pci_zalloc_consistent(nesdev->pcidev,
+ nesdev->cqp_mem_size,
+ &nesdev->cqp_pbase);
if (!nesdev->cqp_vbase) {
nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n");
return -ENOMEM;
}
- memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size);
/* Allocate a twice the number of CQP requests as the SQ size */
nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
@@ -1691,13 +1691,13 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
(NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) +
sizeof(struct nes_hw_nic_qp_context);
- nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size,
- &nesvnic->nic_pbase);
+ nesvnic->nic_vbase = pci_zalloc_consistent(nesdev->pcidev,
+ nesvnic->nic_mem_size,
+ &nesvnic->nic_pbase);
if (!nesvnic->nic_vbase) {
nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n");
return -ENOMEM;
}
- memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size);
nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n",
nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 218dd3574285..fef067c959fc 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1616,8 +1616,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
entries, nescq->cq_mem_size, nescq->hw_cq.cq_number);
/* allocate the physical buffer space */
- mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
- &nescq->hw_cq.cq_pbase);
+ mem = pci_zalloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
+ &nescq->hw_cq.cq_pbase);
if (!mem) {
printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n");
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
@@ -1625,7 +1625,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-ENOMEM);
}
- memset(mem, 0, nescq->cq_mem_size);
nescq->hw_cq.cq_vbase = mem;
nescq->hw_cq.cq_head = 0;
nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n",
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f7e79b481349..a3958c63d7d5 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -665,4 +665,14 @@ config KEYBOARD_CROS_EC
To compile this driver as a module, choose M here: the
module will be called cros_ec_keyb.
+config KEYBOARD_CAP1106
+ tristate "Microchip CAP1106 touch sensor"
+ depends on OF && I2C
+ select REGMAP_I2C
+ help
+ Say Y here to enable the CAP1106 touch sensor driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cap1106.
+
endif
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 7504ae19049d..0a3345634d79 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
obj-$(CONFIG_KEYBOARD_ATKBD) += atkbd.o
obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
+obj-$(CONFIG_KEYBOARD_CAP1106) += cap1106.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c
new file mode 100644
index 000000000000..f7d7a0d4ab4e
--- /dev/null
+++ b/drivers/input/keyboard/cap1106.c
@@ -0,0 +1,335 @@
+/*
+ * Input driver for Microchip CAP1106, 6 channel capacitive touch sensor
+ *
+ * http://www.microchip.com/wwwproducts/Devices.aspx?product=CAP1106
+ *
+ * (c) 2014 Daniel Mack <linux@zonque.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
+
+#define CAP1106_REG_MAIN_CONTROL 0x00
+#define CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT (6)
+#define CAP1106_REG_MAIN_CONTROL_GAIN_MASK (0xc0)
+#define CAP1106_REG_MAIN_CONTROL_DLSEEP BIT(4)
+#define CAP1106_REG_GENERAL_STATUS 0x02
+#define CAP1106_REG_SENSOR_INPUT 0x03
+#define CAP1106_REG_NOISE_FLAG_STATUS 0x0a
+#define CAP1106_REG_SENOR_DELTA(X) (0x10 + (X))
+#define CAP1106_REG_SENSITIVITY_CONTROL 0x1f
+#define CAP1106_REG_CONFIG 0x20
+#define CAP1106_REG_SENSOR_ENABLE 0x21
+#define CAP1106_REG_SENSOR_CONFIG 0x22
+#define CAP1106_REG_SENSOR_CONFIG2 0x23
+#define CAP1106_REG_SAMPLING_CONFIG 0x24
+#define CAP1106_REG_CALIBRATION 0x25
+#define CAP1106_REG_INT_ENABLE 0x26
+#define CAP1106_REG_REPEAT_RATE 0x28
+#define CAP1106_REG_MT_CONFIG 0x2a
+#define CAP1106_REG_MT_PATTERN_CONFIG 0x2b
+#define CAP1106_REG_MT_PATTERN 0x2d
+#define CAP1106_REG_RECALIB_CONFIG 0x2f
+#define CAP1106_REG_SENSOR_THRESH(X) (0x30 + (X))
+#define CAP1106_REG_SENSOR_NOISE_THRESH 0x38
+#define CAP1106_REG_STANDBY_CHANNEL 0x40
+#define CAP1106_REG_STANDBY_CONFIG 0x41
+#define CAP1106_REG_STANDBY_SENSITIVITY 0x42
+#define CAP1106_REG_STANDBY_THRESH 0x43
+#define CAP1106_REG_CONFIG2 0x44
+#define CAP1106_REG_SENSOR_BASE_CNT(X) (0x50 + (X))
+#define CAP1106_REG_SENSOR_CALIB (0xb1 + (X))
+#define CAP1106_REG_SENSOR_CALIB_LSB1 0xb9
+#define CAP1106_REG_SENSOR_CALIB_LSB2 0xba
+#define CAP1106_REG_PRODUCT_ID 0xfd
+#define CAP1106_REG_MANUFACTURER_ID 0xfe
+#define CAP1106_REG_REVISION 0xff
+
+#define CAP1106_NUM_CHN 6
+#define CAP1106_PRODUCT_ID 0x55
+#define CAP1106_MANUFACTURER_ID 0x5d
+
+struct cap1106_priv {
+ struct regmap *regmap;
+ struct input_dev *idev;
+
+ /* config */
+ unsigned int keycodes[CAP1106_NUM_CHN];
+};
+
+static const struct reg_default cap1106_reg_defaults[] = {
+ { CAP1106_REG_MAIN_CONTROL, 0x00 },
+ { CAP1106_REG_GENERAL_STATUS, 0x00 },
+ { CAP1106_REG_SENSOR_INPUT, 0x00 },
+ { CAP1106_REG_NOISE_FLAG_STATUS, 0x00 },
+ { CAP1106_REG_SENSITIVITY_CONTROL, 0x2f },
+ { CAP1106_REG_CONFIG, 0x20 },
+ { CAP1106_REG_SENSOR_ENABLE, 0x3f },
+ { CAP1106_REG_SENSOR_CONFIG, 0xa4 },
+ { CAP1106_REG_SENSOR_CONFIG2, 0x07 },
+ { CAP1106_REG_SAMPLING_CONFIG, 0x39 },
+ { CAP1106_REG_CALIBRATION, 0x00 },
+ { CAP1106_REG_INT_ENABLE, 0x3f },
+ { CAP1106_REG_REPEAT_RATE, 0x3f },
+ { CAP1106_REG_MT_CONFIG, 0x80 },
+ { CAP1106_REG_MT_PATTERN_CONFIG, 0x00 },
+ { CAP1106_REG_MT_PATTERN, 0x3f },
+ { CAP1106_REG_RECALIB_CONFIG, 0x8a },
+ { CAP1106_REG_SENSOR_THRESH(0), 0x40 },
+ { CAP1106_REG_SENSOR_THRESH(1), 0x40 },
+ { CAP1106_REG_SENSOR_THRESH(2), 0x40 },
+ { CAP1106_REG_SENSOR_THRESH(3), 0x40 },
+ { CAP1106_REG_SENSOR_THRESH(4), 0x40 },
+ { CAP1106_REG_SENSOR_THRESH(5), 0x40 },
+ { CAP1106_REG_SENSOR_NOISE_THRESH, 0x01 },
+ { CAP1106_REG_STANDBY_CHANNEL, 0x00 },
+ { CAP1106_REG_STANDBY_CONFIG, 0x39 },
+ { CAP1106_REG_STANDBY_SENSITIVITY, 0x02 },
+ { CAP1106_REG_STANDBY_THRESH, 0x40 },
+ { CAP1106_REG_CONFIG2, 0x40 },
+ { CAP1106_REG_SENSOR_CALIB_LSB1, 0x00 },
+ { CAP1106_REG_SENSOR_CALIB_LSB2, 0x00 },
+};
+
+static bool cap1106_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case CAP1106_REG_MAIN_CONTROL:
+ case CAP1106_REG_SENSOR_INPUT:
+ case CAP1106_REG_SENOR_DELTA(0):
+ case CAP1106_REG_SENOR_DELTA(1):
+ case CAP1106_REG_SENOR_DELTA(2):
+ case CAP1106_REG_SENOR_DELTA(3):
+ case CAP1106_REG_SENOR_DELTA(4):
+ case CAP1106_REG_SENOR_DELTA(5):
+ case CAP1106_REG_PRODUCT_ID:
+ case CAP1106_REG_MANUFACTURER_ID:
+ case CAP1106_REG_REVISION:
+ return true;
+ }
+
+ return false;
+}
+
+static const struct regmap_config cap1106_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CAP1106_REG_REVISION,
+ .reg_defaults = cap1106_reg_defaults,
+
+ .num_reg_defaults = ARRAY_SIZE(cap1106_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = cap1106_volatile_reg,
+};
+
+static irqreturn_t cap1106_thread_func(int irq_num, void *data)
+{
+ struct cap1106_priv *priv = data;
+ unsigned int status;
+ int ret, i;
+
+ /*
+ * Deassert interrupt. This needs to be done before reading the status
+ * registers, which will not carry valid values otherwise.
+ */
+ ret = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL, 1, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = regmap_read(priv->regmap, CAP1106_REG_SENSOR_INPUT, &status);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < CAP1106_NUM_CHN; i++)
+ input_report_key(priv->idev, priv->keycodes[i],
+ status & (1 << i));
+
+ input_sync(priv->idev);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int cap1106_set_sleep(struct cap1106_priv *priv, bool sleep)
+{
+ return regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
+ CAP1106_REG_MAIN_CONTROL_DLSEEP,
+ sleep ? CAP1106_REG_MAIN_CONTROL_DLSEEP : 0);
+}
+
+static int cap1106_input_open(struct input_dev *idev)
+{
+ struct cap1106_priv *priv = input_get_drvdata(idev);
+
+ return cap1106_set_sleep(priv, false);
+}
+
+static void cap1106_input_close(struct input_dev *idev)
+{
+ struct cap1106_priv *priv = input_get_drvdata(idev);
+
+ cap1106_set_sleep(priv, true);
+}
+
+static int cap1106_i2c_probe(struct i2c_client *i2c_client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c_client->dev;
+ struct cap1106_priv *priv;
+ struct device_node *node;
+ int i, error, irq, gain = 0;
+ unsigned int val, rev;
+ u32 gain32, keycodes[CAP1106_NUM_CHN];
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(i2c_client, &cap1106_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ error = regmap_read(priv->regmap, CAP1106_REG_PRODUCT_ID, &val);
+ if (error)
+ return error;
+
+ if (val != CAP1106_PRODUCT_ID) {
+ dev_err(dev, "Product ID: Got 0x%02x, expected 0x%02x\n",
+ val, CAP1106_PRODUCT_ID);
+ return -ENODEV;
+ }
+
+ error = regmap_read(priv->regmap, CAP1106_REG_MANUFACTURER_ID, &val);
+ if (error)
+ return error;
+
+ if (val != CAP1106_MANUFACTURER_ID) {
+ dev_err(dev, "Manufacturer ID: Got 0x%02x, expected 0x%02x\n",
+ val, CAP1106_MANUFACTURER_ID);
+ return -ENODEV;
+ }
+
+ error = regmap_read(priv->regmap, CAP1106_REG_REVISION, &rev);
+ if (error < 0)
+ return error;
+
+ dev_info(dev, "CAP1106 detected, revision 0x%02x\n", rev);
+ i2c_set_clientdata(i2c_client, priv);
+ node = dev->of_node;
+
+ if (!of_property_read_u32(node, "microchip,sensor-gain", &gain32)) {
+ if (is_power_of_2(gain32) && gain32 <= 8)
+ gain = ilog2(gain32);
+ else
+ dev_err(dev, "Invalid sensor-gain value %d\n", gain32);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(keycodes) != ARRAY_SIZE(priv->keycodes));
+
+ /* Provide some useful defaults */
+ for (i = 0; i < ARRAY_SIZE(keycodes); i++)
+ keycodes[i] = KEY_A + i;
+
+ of_property_read_u32_array(node, "linux,keycodes",
+ keycodes, ARRAY_SIZE(keycodes));
+
+ for (i = 0; i < ARRAY_SIZE(keycodes); i++)
+ priv->keycodes[i] = keycodes[i];
+
+ error = regmap_update_bits(priv->regmap, CAP1106_REG_MAIN_CONTROL,
+ CAP1106_REG_MAIN_CONTROL_GAIN_MASK,
+ gain << CAP1106_REG_MAIN_CONTROL_GAIN_SHIFT);
+ if (error)
+ return error;
+
+ /* Disable autorepeat. The Linux input system has its own handling. */
+ error = regmap_write(priv->regmap, CAP1106_REG_REPEAT_RATE, 0);
+ if (error)
+ return error;
+
+ priv->idev = devm_input_allocate_device(dev);
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "CAP1106 capacitive touch sensor";
+ priv->idev->id.bustype = BUS_I2C;
+ priv->idev->evbit[0] = BIT_MASK(EV_KEY);
+
+ if (of_property_read_bool(node, "autorepeat"))
+ __set_bit(EV_REP, priv->idev->evbit);
+
+ for (i = 0; i < CAP1106_NUM_CHN; i++)
+ __set_bit(priv->keycodes[i], priv->idev->keybit);
+
+ priv->idev->id.vendor = CAP1106_MANUFACTURER_ID;
+ priv->idev->id.product = CAP1106_PRODUCT_ID;
+ priv->idev->id.version = rev;
+
+ priv->idev->open = cap1106_input_open;
+ priv->idev->close = cap1106_input_close;
+
+ input_set_drvdata(priv->idev, priv);
+
+ /*
+ * Put the device in deep sleep mode for now.
+ * ->open() will bring it back once the it is actually needed.
+ */
+ cap1106_set_sleep(priv, true);
+
+ error = input_register_device(priv->idev);
+ if (error)
+ return error;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+ dev_err(dev, "Unable to parse or map IRQ\n");
+ return -ENXIO;
+ }
+
+ error = devm_request_threaded_irq(dev, irq, NULL, cap1106_thread_func,
+ IRQF_ONESHOT, dev_name(dev), priv);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct of_device_id cap1106_dt_ids[] = {
+ { .compatible = "microchip,cap1106", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cap1106_dt_ids);
+
+static const struct i2c_device_id cap1106_i2c_ids[] = {
+ { "cap1106", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, cap1106_i2c_ids);
+
+static struct i2c_driver cap1106_i2c_driver = {
+ .driver = {
+ .name = "cap1106",
+ .owner = THIS_MODULE,
+ .of_match_table = cap1106_dt_ids,
+ },
+ .id_table = cap1106_i2c_ids,
+ .probe = cap1106_i2c_probe,
+};
+
+module_i2c_driver(cap1106_i2c_driver);
+
+MODULE_ALIAS("platform:cap1106");
+MODULE_DESCRIPTION("Microchip CAP1106 driver");
+MODULE_AUTHOR("Daniel Mack <linux@zonque.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 408379669d3c..791781ade4e7 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -24,8 +24,8 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/input/matrix_keypad.h>
@@ -42,7 +42,6 @@
* @dev: Device pointer
* @idev: Input device
* @ec: Top level ChromeOS device to use to talk to EC
- * @event_notifier: interrupt event notifier for transport devices
*/
struct cros_ec_keyb {
unsigned int rows;
@@ -55,7 +54,6 @@ struct cros_ec_keyb {
struct device *dev;
struct input_dev *idev;
struct cros_ec_device *ec;
- struct notifier_block notifier;
};
@@ -173,41 +171,55 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
input_sync(ckdev->idev);
}
-static int cros_ec_keyb_open(struct input_dev *dev)
-{
- struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
-
- return blocking_notifier_chain_register(&ckdev->ec->event_notifier,
- &ckdev->notifier);
-}
-
-static void cros_ec_keyb_close(struct input_dev *dev)
-{
- struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
-
- blocking_notifier_chain_unregister(&ckdev->ec->event_notifier,
- &ckdev->notifier);
-}
-
static int cros_ec_keyb_get_state(struct cros_ec_keyb *ckdev, uint8_t *kb_state)
{
- return ckdev->ec->command_recv(ckdev->ec, EC_CMD_MKBP_STATE,
- kb_state, ckdev->cols);
+ struct cros_ec_command msg = {
+ .version = 0,
+ .command = EC_CMD_MKBP_STATE,
+ .outdata = NULL,
+ .outsize = 0,
+ .indata = kb_state,
+ .insize = ckdev->cols,
+ };
+
+ return ckdev->ec->cmd_xfer(ckdev->ec, &msg);
}
-static int cros_ec_keyb_work(struct notifier_block *nb,
- unsigned long state, void *_notify)
+static irqreturn_t cros_ec_keyb_irq(int irq, void *data)
{
+ struct cros_ec_keyb *ckdev = data;
+ struct cros_ec_device *ec = ckdev->ec;
int ret;
- struct cros_ec_keyb *ckdev = container_of(nb, struct cros_ec_keyb,
- notifier);
uint8_t kb_state[ckdev->cols];
+ if (device_may_wakeup(ec->dev))
+ pm_wakeup_event(ec->dev, 0);
+
ret = cros_ec_keyb_get_state(ckdev, kb_state);
if (ret >= 0)
cros_ec_keyb_process(ckdev, kb_state, ret);
+ else
+ dev_err(ec->dev, "failed to get keyboard state: %d\n", ret);
- return NOTIFY_DONE;
+ return IRQ_HANDLED;
+}
+
+static int cros_ec_keyb_open(struct input_dev *dev)
+{
+ struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
+ struct cros_ec_device *ec = ckdev->ec;
+
+ return request_threaded_irq(ec->irq, NULL, cros_ec_keyb_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "cros_ec_keyb", ckdev);
+}
+
+static void cros_ec_keyb_close(struct input_dev *dev)
+{
+ struct cros_ec_keyb *ckdev = input_get_drvdata(dev);
+ struct cros_ec_device *ec = ckdev->ec;
+
+ free_irq(ec->irq, ckdev);
}
static int cros_ec_keyb_probe(struct platform_device *pdev)
@@ -238,8 +250,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
if (!idev)
return -ENOMEM;
+ if (!ec->irq) {
+ dev_err(dev, "no EC IRQ specified\n");
+ return -EINVAL;
+ }
+
ckdev->ec = ec;
- ckdev->notifier.notifier_call = cros_ec_keyb_work;
ckdev->dev = dev;
dev_set_drvdata(&pdev->dev, ckdev);
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index 8280cb16260b..20a99c368d16 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -531,8 +531,7 @@ static int imx_keypad_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int imx_kbd_suspend(struct device *dev)
+static int __maybe_unused imx_kbd_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx_keypad *kbd = platform_get_drvdata(pdev);
@@ -552,7 +551,7 @@ static int imx_kbd_suspend(struct device *dev)
return 0;
}
-static int imx_kbd_resume(struct device *dev)
+static int __maybe_unused imx_kbd_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct imx_keypad *kbd = platform_get_drvdata(pdev);
@@ -575,7 +574,6 @@ err_clk:
return ret;
}
-#endif
static SIMPLE_DEV_PM_OPS(imx_kbd_pm_ops, imx_kbd_suspend, imx_kbd_resume);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 0b42118cbf8f..cb32e2b506b7 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -558,6 +558,12 @@ static ssize_t lm8323_pwm_store_time(struct device *dev,
}
static DEVICE_ATTR(time, 0644, lm8323_pwm_show_time, lm8323_pwm_store_time);
+static struct attribute *lm8323_pwm_attrs[] = {
+ &dev_attr_time.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(lm8323_pwm);
+
static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
const char *name)
{
@@ -580,16 +586,11 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
if (name) {
pwm->cdev.name = name;
pwm->cdev.brightness_set = lm8323_pwm_set_brightness;
+ pwm->cdev.groups = lm8323_pwm_groups;
if (led_classdev_register(dev, &pwm->cdev) < 0) {
dev_err(dev, "couldn't register PWM %d\n", id);
return -1;
}
- if (device_create_file(pwm->cdev.dev,
- &dev_attr_time) < 0) {
- dev_err(dev, "couldn't register time attribute\n");
- led_classdev_unregister(&pwm->cdev);
- return -1;
- }
pwm->enabled = true;
}
@@ -753,11 +754,8 @@ fail3:
device_remove_file(&client->dev, &dev_attr_disable_kp);
fail2:
while (--pwm >= 0)
- if (lm->pwm[pwm].enabled) {
- device_remove_file(lm->pwm[pwm].cdev.dev,
- &dev_attr_time);
+ if (lm->pwm[pwm].enabled)
led_classdev_unregister(&lm->pwm[pwm].cdev);
- }
fail1:
input_free_device(idev);
kfree(lm);
@@ -777,10 +775,8 @@ static int lm8323_remove(struct i2c_client *client)
device_remove_file(&lm->client->dev, &dev_attr_disable_kp);
for (i = 0; i < 3; i++)
- if (lm->pwm[i].enabled) {
- device_remove_file(lm->pwm[i].cdev.dev, &dev_attr_time);
+ if (lm->pwm[i].enabled)
led_classdev_unregister(&lm->pwm[i].cdev);
- }
kfree(lm);
diff --git a/drivers/input/keyboard/max7359_keypad.c b/drivers/input/keyboard/max7359_keypad.c
index 430b54539720..faa6da53eba8 100644
--- a/drivers/input/keyboard/max7359_keypad.c
+++ b/drivers/input/keyboard/max7359_keypad.c
@@ -203,12 +203,17 @@ static int max7359_probe(struct i2c_client *client,
dev_dbg(&client->dev, "keys FIFO is 0x%02x\n", ret);
- keypad = kzalloc(sizeof(struct max7359_keypad), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!keypad || !input_dev) {
+ keypad = devm_kzalloc(&client->dev, sizeof(struct max7359_keypad),
+ GFP_KERNEL);
+ if (!keypad) {
dev_err(&client->dev, "failed to allocate memory\n");
- error = -ENOMEM;
- goto failed_free_mem;
+ return -ENOMEM;
+ }
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "failed to allocate input device\n");
+ return -ENOMEM;
}
keypad->client = client;
@@ -230,19 +235,20 @@ static int max7359_probe(struct i2c_client *client,
max7359_build_keycode(keypad, keymap_data);
- error = request_threaded_irq(client->irq, NULL, max7359_interrupt,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- client->name, keypad);
+ error = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ max7359_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, keypad);
if (error) {
dev_err(&client->dev, "failed to register interrupt\n");
- goto failed_free_mem;
+ return error;
}
/* Register the input device */
error = input_register_device(input_dev);
if (error) {
dev_err(&client->dev, "failed to register input device\n");
- goto failed_free_irq;
+ return error;
}
/* Initialize MAX7359 */
@@ -252,24 +258,6 @@ static int max7359_probe(struct i2c_client *client,
device_init_wakeup(&client->dev, 1);
return 0;
-
-failed_free_irq:
- free_irq(client->irq, keypad);
-failed_free_mem:
- input_free_device(input_dev);
- kfree(keypad);
- return error;
-}
-
-static int max7359_remove(struct i2c_client *client)
-{
- struct max7359_keypad *keypad = i2c_get_clientdata(client);
-
- free_irq(client->irq, keypad);
- input_unregister_device(keypad->input_dev);
- kfree(keypad);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -313,7 +301,6 @@ static struct i2c_driver max7359_i2c_driver = {
.pm = &max7359_pm,
},
.probe = max7359_probe,
- .remove = max7359_remove,
.id_table = max7359_ids,
};
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index 01f3b5b300f3..a3fe4a990cc9 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -392,7 +392,6 @@ static void keyspan_irq_recv(struct urb *urb)
default:
goto resubmit;
- break;
}
if (debug)
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 5a6334be30b8..e34dfc29beb3 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -83,6 +83,9 @@ soc_button_device_create(struct pnp_dev *pdev,
sizeof(*gpio_keys_pdata) +
sizeof(*gpio_keys) * MAX_NBUTTONS,
GFP_KERNEL);
+ if (!gpio_keys_pdata)
+ return ERR_PTR(-ENOMEM);
+
gpio_keys = (void *)(gpio_keys_pdata + 1);
for (info = button_info; info->name; info++) {
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 856936247500..421e29e4cd81 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -311,7 +311,14 @@ static int uinput_open(struct inode *inode, struct file *file)
static int uinput_validate_absbits(struct input_dev *dev)
{
unsigned int cnt;
- int retval = 0;
+ int nslot;
+
+ if (!test_bit(EV_ABS, dev->evbit))
+ return 0;
+
+ /*
+ * Check if absmin/absmax/absfuzz/absflat are sane.
+ */
for (cnt = 0; cnt < ABS_CNT; cnt++) {
int min, max;
@@ -327,8 +334,7 @@ static int uinput_validate_absbits(struct input_dev *dev)
UINPUT_NAME, cnt,
input_abs_get_min(dev, cnt),
input_abs_get_max(dev, cnt));
- retval = -EINVAL;
- break;
+ return -EINVAL;
}
if (input_abs_get_flat(dev, cnt) >
@@ -340,11 +346,18 @@ static int uinput_validate_absbits(struct input_dev *dev)
input_abs_get_flat(dev, cnt),
input_abs_get_min(dev, cnt),
input_abs_get_max(dev, cnt));
- retval = -EINVAL;
- break;
+ return -EINVAL;
}
}
- return retval;
+
+ if (test_bit(ABS_MT_SLOT, dev->absbit)) {
+ nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
+ input_mt_init_slots(dev, nslot, 0);
+ } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
+ input_set_events_per_packet(dev, 60);
+ }
+
+ return 0;
}
static int uinput_allocate_device(struct uinput_device *udev)
@@ -410,19 +423,9 @@ static int uinput_setup_device(struct uinput_device *udev,
input_abs_set_flat(dev, i, user_dev->absflat[i]);
}
- /* check if absmin/absmax/absfuzz/absflat are filled as
- * told in Documentation/input/input-programming.txt */
- if (test_bit(EV_ABS, dev->evbit)) {
- retval = uinput_validate_absbits(dev);
- if (retval < 0)
- goto exit;
- if (test_bit(ABS_MT_SLOT, dev->absbit)) {
- int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
- input_mt_init_slots(dev, nslot, 0);
- } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
- input_set_events_per_packet(dev, 60);
- }
- }
+ retval = uinput_validate_absbits(dev);
+ if (retval < 0)
+ goto exit;
udev->state = UIST_SETUP_COMPLETE;
retval = count;
@@ -720,6 +723,12 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
}
switch (cmd) {
+ case UI_GET_VERSION:
+ if (put_user(UINPUT_VERSION,
+ (unsigned int __user *)p))
+ retval = -EFAULT;
+ goto out;
+
case UI_DEV_CREATE:
retval = uinput_create_device(udev);
goto out;
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index fb15c64ffb95..a59a1a64b674 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -99,6 +99,8 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
6-byte ALPS packet */
+#define ALPS_IS_RUSHMORE 0x100 /* device is a rushmore */
+#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
static const struct alps_model_info alps_model_data[] = {
{ { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
@@ -281,11 +283,10 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
*
* The bitmaps don't have enough data to track fingers, so this function
* only generates points representing a bounding box of at most two contacts.
- * These two points are returned in x1, y1, x2, and y2.
+ * These two points are returned in fields->mt.
*/
static void alps_process_bitmap_dolphin(struct alps_data *priv,
- struct alps_fields *fields,
- int *x1, int *y1, int *x2, int *y2)
+ struct alps_fields *fields)
{
int box_middle_x, box_middle_y;
unsigned int x_map, y_map;
@@ -308,8 +309,6 @@ static void alps_process_bitmap_dolphin(struct alps_data *priv,
if (x_msb > priv->x_bits || y_msb > priv->y_bits)
return;
- *x1 = *y1 = *x2 = *y2 = 0;
-
if (fields->fingers > 1) {
start_bit = priv->x_bits - x_msb;
end_bit = priv->x_bits - x_lsb;
@@ -320,10 +319,35 @@ static void alps_process_bitmap_dolphin(struct alps_data *priv,
end_bit = y_msb - 1;
box_middle_y = (priv->y_max * (start_bit + end_bit)) /
(2 * (priv->y_bits - 1));
- *x1 = fields->x;
- *y1 = fields->y;
- *x2 = 2 * box_middle_x - *x1;
- *y2 = 2 * box_middle_y - *y1;
+ fields->mt[0] = fields->st;
+ fields->mt[1].x = 2 * box_middle_x - fields->mt[0].x;
+ fields->mt[1].y = 2 * box_middle_y - fields->mt[0].y;
+ }
+}
+
+static void alps_get_bitmap_points(unsigned int map,
+ struct alps_bitmap_point *low,
+ struct alps_bitmap_point *high,
+ int *fingers)
+{
+ struct alps_bitmap_point *point;
+ int i, bit, prev_bit = 0;
+
+ point = low;
+ for (i = 0; map != 0; i++, map >>= 1) {
+ bit = map & 1;
+ if (bit) {
+ if (!prev_bit) {
+ point->start_bit = i;
+ point->num_bits = 0;
+ (*fingers)++;
+ }
+ point->num_bits++;
+ } else {
+ if (prev_bit)
+ point = high;
+ }
+ prev_bit = bit;
}
}
@@ -334,71 +358,21 @@ static void alps_process_bitmap_dolphin(struct alps_data *priv,
*
* The bitmaps don't have enough data to track fingers, so this function
* only generates points representing a bounding box of all contacts.
- * These points are returned in x1, y1, x2, and y2 when the return value
+ * These points are returned in fields->mt when the return value
* is greater than 0.
*/
static int alps_process_bitmap(struct alps_data *priv,
- unsigned int x_map, unsigned int y_map,
- int *x1, int *y1, int *x2, int *y2)
+ struct alps_fields *fields)
{
- struct alps_bitmap_point {
- int start_bit;
- int num_bits;
- };
-
- int fingers_x = 0, fingers_y = 0, fingers;
- int i, bit, prev_bit;
+ int i, fingers_x = 0, fingers_y = 0, fingers;
struct alps_bitmap_point x_low = {0,}, x_high = {0,};
struct alps_bitmap_point y_low = {0,}, y_high = {0,};
- struct alps_bitmap_point *point;
- if (!x_map || !y_map)
+ if (!fields->x_map || !fields->y_map)
return 0;
- *x1 = *y1 = *x2 = *y2 = 0;
-
- prev_bit = 0;
- point = &x_low;
- for (i = 0; x_map != 0; i++, x_map >>= 1) {
- bit = x_map & 1;
- if (bit) {
- if (!prev_bit) {
- point->start_bit = i;
- fingers_x++;
- }
- point->num_bits++;
- } else {
- if (prev_bit)
- point = &x_high;
- else
- point->num_bits = 0;
- }
- prev_bit = bit;
- }
-
- /*
- * y bitmap is reversed for what we need (lower positions are in
- * higher bits), so we process from the top end.
- */
- y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - priv->y_bits);
- prev_bit = 0;
- point = &y_low;
- for (i = 0; y_map != 0; i++, y_map <<= 1) {
- bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1));
- if (bit) {
- if (!prev_bit) {
- point->start_bit = i;
- fingers_y++;
- }
- point->num_bits++;
- } else {
- if (prev_bit)
- point = &y_high;
- else
- point->num_bits = 0;
- }
- prev_bit = bit;
- }
+ alps_get_bitmap_points(fields->x_map, &x_low, &x_high, &fingers_x);
+ alps_get_bitmap_points(fields->y_map, &y_low, &y_high, &fingers_y);
/*
* Fingers can overlap, so we use the maximum count of fingers
@@ -407,58 +381,91 @@ static int alps_process_bitmap(struct alps_data *priv,
fingers = max(fingers_x, fingers_y);
/*
- * If total fingers is > 1 but either axis reports only a single
- * contact, we have overlapping or adjacent fingers. For the
- * purposes of creating a bounding box, divide the single contact
- * (roughly) equally between the two points.
+ * If an axis reports only a single contact, we have overlapping or
+ * adjacent fingers. Divide the single contact between the two points.
*/
- if (fingers > 1) {
- if (fingers_x == 1) {
- i = x_low.num_bits / 2;
- x_low.num_bits = x_low.num_bits - i;
- x_high.start_bit = x_low.start_bit + i;
- x_high.num_bits = max(i, 1);
- } else if (fingers_y == 1) {
- i = y_low.num_bits / 2;
- y_low.num_bits = y_low.num_bits - i;
- y_high.start_bit = y_low.start_bit + i;
- y_high.num_bits = max(i, 1);
- }
+ if (fingers_x == 1) {
+ i = (x_low.num_bits - 1) / 2;
+ x_low.num_bits = x_low.num_bits - i;
+ x_high.start_bit = x_low.start_bit + i;
+ x_high.num_bits = max(i, 1);
+ }
+ if (fingers_y == 1) {
+ i = (y_low.num_bits - 1) / 2;
+ y_low.num_bits = y_low.num_bits - i;
+ y_high.start_bit = y_low.start_bit + i;
+ y_high.num_bits = max(i, 1);
}
- *x1 = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) /
- (2 * (priv->x_bits - 1));
- *y1 = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) /
- (2 * (priv->y_bits - 1));
-
- if (fingers > 1) {
- *x2 = (priv->x_max *
- (2 * x_high.start_bit + x_high.num_bits - 1)) /
- (2 * (priv->x_bits - 1));
- *y2 = (priv->y_max *
- (2 * y_high.start_bit + y_high.num_bits - 1)) /
- (2 * (priv->y_bits - 1));
+ fields->mt[0].x =
+ (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+ (2 * (priv->x_bits - 1));
+ fields->mt[0].y =
+ (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+ (2 * (priv->y_bits - 1));
+
+ fields->mt[1].x =
+ (priv->x_max * (2 * x_high.start_bit + x_high.num_bits - 1)) /
+ (2 * (priv->x_bits - 1));
+ fields->mt[1].y =
+ (priv->y_max * (2 * y_high.start_bit + y_high.num_bits - 1)) /
+ (2 * (priv->y_bits - 1));
+
+ /* y-bitmap order is reversed, except on rushmore */
+ if (!(priv->flags & ALPS_IS_RUSHMORE)) {
+ fields->mt[0].y = priv->y_max - fields->mt[0].y;
+ fields->mt[1].y = priv->y_max - fields->mt[1].y;
}
return fingers;
}
-static void alps_set_slot(struct input_dev *dev, int slot, bool active,
- int x, int y)
+static void alps_set_slot(struct input_dev *dev, int slot, int x, int y)
{
input_mt_slot(dev, slot);
- input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
- if (active) {
- input_report_abs(dev, ABS_MT_POSITION_X, x);
- input_report_abs(dev, ABS_MT_POSITION_Y, y);
- }
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, true);
+ input_report_abs(dev, ABS_MT_POSITION_X, x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, y);
}
-static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers,
- int x1, int y1, int x2, int y2)
+static void alps_report_mt_data(struct psmouse *psmouse, int n)
{
- alps_set_slot(dev, 0, num_fingers != 0, x1, y1);
- alps_set_slot(dev, 1, num_fingers == 2, x2, y2);
+ struct alps_data *priv = psmouse->private;
+ struct input_dev *dev = psmouse->dev;
+ struct alps_fields *f = &priv->f;
+ int i, slot[MAX_TOUCHES];
+
+ input_mt_assign_slots(dev, slot, f->mt, n);
+ for (i = 0; i < n; i++)
+ alps_set_slot(dev, slot[i], f->mt[i].x, f->mt[i].y);
+
+ input_mt_sync_frame(dev);
+}
+
+static void alps_report_semi_mt_data(struct psmouse *psmouse, int fingers)
+{
+ struct alps_data *priv = psmouse->private;
+ struct input_dev *dev = psmouse->dev;
+ struct alps_fields *f = &priv->f;
+
+ /* Use st data when we don't have mt data */
+ if (fingers < 2) {
+ f->mt[0].x = f->st.x;
+ f->mt[0].y = f->st.y;
+ fingers = f->pressure > 0 ? 1 : 0;
+ }
+
+ alps_report_mt_data(psmouse, (fingers <= 2) ? fingers : 2);
+
+ input_mt_report_finger_count(dev, fingers);
+
+ input_report_key(dev, BTN_LEFT, f->left);
+ input_report_key(dev, BTN_RIGHT, f->right);
+ input_report_key(dev, BTN_MIDDLE, f->middle);
+
+ input_report_abs(dev, ABS_PRESSURE, f->pressure);
+
+ input_sync(dev);
}
static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
@@ -532,7 +539,7 @@ static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p)
f->ts_middle = !!(p[3] & 0x40);
}
-static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p,
+static int alps_decode_pinnacle(struct alps_fields *f, unsigned char *p,
struct psmouse *psmouse)
{
f->first_mp = !!(p[4] & 0x40);
@@ -546,24 +553,31 @@ static void alps_decode_pinnacle(struct alps_fields *f, unsigned char *p,
((p[2] & 0x7f) << 1) |
(p[4] & 0x01);
- f->x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
+ f->st.x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) |
((p[0] & 0x30) >> 4);
- f->y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
- f->z = p[5] & 0x7f;
+ f->st.y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f);
+ f->pressure = p[5] & 0x7f;
alps_decode_buttons_v3(f, p);
+
+ return 0;
}
-static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p,
+static int alps_decode_rushmore(struct alps_fields *f, unsigned char *p,
struct psmouse *psmouse)
{
alps_decode_pinnacle(f, p, psmouse);
+ /* Rushmore's packet decode has a bit difference with Pinnacle's */
+ f->is_mp = !!(p[5] & 0x40);
+ f->fingers = max((p[5] & 0x3), ((p[5] >> 2) & 0x3)) + 1;
f->x_map |= (p[5] & 0x10) << 11;
f->y_map |= (p[5] & 0x20) << 6;
+
+ return 0;
}
-static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p,
+static int alps_decode_dolphin(struct alps_fields *f, unsigned char *p,
struct psmouse *psmouse)
{
u64 palm_data = 0;
@@ -573,9 +587,9 @@ static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p,
f->is_mp = !!(p[0] & 0x20);
if (!f->is_mp) {
- f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
- f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
- f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
+ f->st.x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
+ f->st.y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
+ f->pressure = (p[0] & 4) ? 0 : p[5] & 0x7f;
alps_decode_buttons_v3(f, p);
} else {
f->fingers = ((p[0] & 0x6) >> 1 |
@@ -596,19 +610,21 @@ static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p,
f->x_map = (palm_data >> priv->y_bits) &
(BIT(priv->x_bits) - 1);
}
+
+ return 0;
}
static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
- struct input_dev *dev = psmouse->dev;
struct input_dev *dev2 = priv->dev2;
- int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
- int fingers = 0, bmap_fn;
- struct alps_fields f = {0};
+ struct alps_fields *f = &priv->f;
+ int fingers = 0;
- priv->decode_fields(&f, packet, psmouse);
+ memset(f, 0, sizeof(*f));
+
+ priv->decode_fields(f, packet, psmouse);
/*
* There's no single feature of touchpad position and bitmap packets
@@ -623,22 +639,14 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
* packet. Check for this, and when it happens process the
* position packet as usual.
*/
- if (f.is_mp) {
- fingers = f.fingers;
+ if (f->is_mp) {
+ fingers = f->fingers;
if (priv->proto_version == ALPS_PROTO_V3) {
- bmap_fn = alps_process_bitmap(priv, f.x_map,
- f.y_map, &x1, &y1,
- &x2, &y2);
-
- /*
- * We shouldn't report more than one finger if
- * we don't have two coordinates.
- */
- if (fingers > 1 && bmap_fn < 2)
- fingers = bmap_fn;
+ if (alps_process_bitmap(priv, f) == 0)
+ fingers = 0; /* Use st data */
/* Now process position packet */
- priv->decode_fields(&f, priv->multi_data,
+ priv->decode_fields(f, priv->multi_data,
psmouse);
} else {
/*
@@ -647,15 +655,14 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
* calculate Pt2, so we need to do position
* packet decode first.
*/
- priv->decode_fields(&f, priv->multi_data,
+ priv->decode_fields(f, priv->multi_data,
psmouse);
/*
* Since Dolphin's finger number is reliable,
* there is no need to compare with bmap_fn.
*/
- alps_process_bitmap_dolphin(priv, &f, &x1, &y1,
- &x2, &y2);
+ alps_process_bitmap_dolphin(priv, f);
}
} else {
priv->multi_packet = 0;
@@ -670,10 +677,10 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
* out misidentified bitmap packets, we reject anything with this
* bit set.
*/
- if (f.is_mp)
+ if (f->is_mp)
return;
- if (!priv->multi_packet && f.first_mp) {
+ if (!priv->multi_packet && f->first_mp) {
priv->multi_packet = 1;
memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
return;
@@ -687,44 +694,15 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
* with x, y, and z all zero, so these seem to be flukes.
* Ignore them.
*/
- if (f.x && f.y && !f.z)
+ if (f->st.x && f->st.y && !f->pressure)
return;
- /*
- * If we don't have MT data or the bitmaps were empty, we have
- * to rely on ST data.
- */
- if (!fingers) {
- x1 = f.x;
- y1 = f.y;
- fingers = f.z > 0 ? 1 : 0;
- }
-
- if (f.z >= 64)
- input_report_key(dev, BTN_TOUCH, 1);
- else
- input_report_key(dev, BTN_TOUCH, 0);
-
- alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
-
- input_mt_report_finger_count(dev, fingers);
-
- input_report_key(dev, BTN_LEFT, f.left);
- input_report_key(dev, BTN_RIGHT, f.right);
- input_report_key(dev, BTN_MIDDLE, f.middle);
-
- if (f.z > 0) {
- input_report_abs(dev, ABS_X, f.x);
- input_report_abs(dev, ABS_Y, f.y);
- }
- input_report_abs(dev, ABS_PRESSURE, f.z);
-
- input_sync(dev);
+ alps_report_semi_mt_data(psmouse, fingers);
if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
- input_report_key(dev2, BTN_LEFT, f.ts_left);
- input_report_key(dev2, BTN_RIGHT, f.ts_right);
- input_report_key(dev2, BTN_MIDDLE, f.ts_middle);
+ input_report_key(dev2, BTN_LEFT, f->ts_left);
+ input_report_key(dev2, BTN_RIGHT, f->ts_right);
+ input_report_key(dev2, BTN_MIDDLE, f->ts_middle);
input_sync(dev2);
}
}
@@ -823,13 +801,8 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
unsigned char *packet = psmouse->packet;
- struct input_dev *dev = psmouse->dev;
+ struct alps_fields *f = &priv->f;
int offset;
- int x, y, z;
- int left, right;
- int x1, y1, x2, y2;
- int fingers = 0;
- unsigned int x_bitmap, y_bitmap;
/*
* v4 has a 6-byte encoding for bitmap data, but this data is
@@ -851,71 +824,207 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
if (++priv->multi_packet > 2) {
priv->multi_packet = 0;
- x_bitmap = ((priv->multi_data[2] & 0x1f) << 10) |
+ f->x_map = ((priv->multi_data[2] & 0x1f) << 10) |
((priv->multi_data[3] & 0x60) << 3) |
((priv->multi_data[0] & 0x3f) << 2) |
((priv->multi_data[1] & 0x60) >> 5);
- y_bitmap = ((priv->multi_data[5] & 0x01) << 10) |
+ f->y_map = ((priv->multi_data[5] & 0x01) << 10) |
((priv->multi_data[3] & 0x1f) << 5) |
(priv->multi_data[1] & 0x1f);
- fingers = alps_process_bitmap(priv, x_bitmap, y_bitmap,
- &x1, &y1, &x2, &y2);
-
- /* Store MT data.*/
- priv->fingers = fingers;
- priv->x1 = x1;
- priv->x2 = x2;
- priv->y1 = y1;
- priv->y2 = y2;
+ f->fingers = alps_process_bitmap(priv, f);
}
- left = packet[4] & 0x01;
- right = packet[4] & 0x02;
+ f->left = packet[4] & 0x01;
+ f->right = packet[4] & 0x02;
- x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
- ((packet[0] & 0x30) >> 4);
- y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
- z = packet[5] & 0x7f;
+ f->st.x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+ f->st.y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
+ f->pressure = packet[5] & 0x7f;
- /*
- * If there were no contacts in the bitmap, use ST
- * points in MT reports.
- * If there were two contacts or more, report MT data.
- */
- if (priv->fingers < 2) {
- x1 = x;
- y1 = y;
- fingers = z > 0 ? 1 : 0;
- } else {
- fingers = priv->fingers;
- x1 = priv->x1;
- x2 = priv->x2;
- y1 = priv->y1;
- y2 = priv->y2;
+ alps_report_semi_mt_data(psmouse, f->fingers);
+}
+
+static bool alps_is_valid_package_v7(struct psmouse *psmouse)
+{
+ switch (psmouse->pktcnt) {
+ case 3:
+ return (psmouse->packet[2] & 0x40) == 0x40;
+ case 4:
+ return (psmouse->packet[3] & 0x48) == 0x48;
+ case 6:
+ return (psmouse->packet[5] & 0x40) == 0x00;
}
+ return true;
+}
- if (z >= 64)
- input_report_key(dev, BTN_TOUCH, 1);
+static unsigned char alps_get_packet_id_v7(char *byte)
+{
+ unsigned char packet_id;
+
+ if (byte[4] & 0x40)
+ packet_id = V7_PACKET_ID_TWO;
+ else if (byte[4] & 0x01)
+ packet_id = V7_PACKET_ID_MULTI;
+ else if ((byte[0] & 0x10) && !(byte[4] & 0x43))
+ packet_id = V7_PACKET_ID_NEW;
+ else if (byte[1] == 0x00 && byte[4] == 0x00)
+ packet_id = V7_PACKET_ID_IDLE;
else
- input_report_key(dev, BTN_TOUCH, 0);
+ packet_id = V7_PACKET_ID_UNKNOWN;
- alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+ return packet_id;
+}
- input_mt_report_finger_count(dev, fingers);
+static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
+ unsigned char *pkt,
+ unsigned char pkt_id)
+{
+ mt[0].x = ((pkt[2] & 0x80) << 4);
+ mt[0].x |= ((pkt[2] & 0x3F) << 5);
+ mt[0].x |= ((pkt[3] & 0x30) >> 1);
+ mt[0].x |= (pkt[3] & 0x07);
+ mt[0].y = (pkt[1] << 3) | (pkt[0] & 0x07);
+
+ mt[1].x = ((pkt[3] & 0x80) << 4);
+ mt[1].x |= ((pkt[4] & 0x80) << 3);
+ mt[1].x |= ((pkt[4] & 0x3F) << 4);
+ mt[1].y = ((pkt[5] & 0x80) << 3);
+ mt[1].y |= ((pkt[5] & 0x3F) << 4);
+
+ switch (pkt_id) {
+ case V7_PACKET_ID_TWO:
+ mt[1].x &= ~0x000F;
+ mt[1].y |= 0x000F;
+ break;
- input_report_key(dev, BTN_LEFT, left);
- input_report_key(dev, BTN_RIGHT, right);
+ case V7_PACKET_ID_MULTI:
+ mt[1].x &= ~0x003F;
+ mt[1].y &= ~0x0020;
+ mt[1].y |= ((pkt[4] & 0x02) << 4);
+ mt[1].y |= 0x001F;
+ break;
- if (z > 0) {
- input_report_abs(dev, ABS_X, x);
- input_report_abs(dev, ABS_Y, y);
+ case V7_PACKET_ID_NEW:
+ mt[1].x &= ~0x003F;
+ mt[1].x |= (pkt[0] & 0x20);
+ mt[1].y |= 0x000F;
+ break;
}
- input_report_abs(dev, ABS_PRESSURE, z);
+
+ mt[0].y = 0x7FF - mt[0].y;
+ mt[1].y = 0x7FF - mt[1].y;
+}
+
+static int alps_get_mt_count(struct input_mt_pos *mt)
+{
+ int i;
+
+ for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
+ /* empty */;
+
+ return i;
+}
+
+static int alps_decode_packet_v7(struct alps_fields *f,
+ unsigned char *p,
+ struct psmouse *psmouse)
+{
+ unsigned char pkt_id;
+
+ pkt_id = alps_get_packet_id_v7(p);
+ if (pkt_id == V7_PACKET_ID_IDLE)
+ return 0;
+ if (pkt_id == V7_PACKET_ID_UNKNOWN)
+ return -1;
+
+ alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
+
+ if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
+ f->left = (p[0] & 0x80) >> 7;
+ f->right = (p[0] & 0x20) >> 5;
+ f->middle = (p[0] & 0x10) >> 4;
+ }
+
+ if (pkt_id == V7_PACKET_ID_TWO)
+ f->fingers = alps_get_mt_count(f->mt);
+ else if (pkt_id == V7_PACKET_ID_MULTI)
+ f->fingers = 3 + (p[5] & 0x03);
+
+ return 0;
+}
+
+static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev2 = priv->dev2;
+ int x, y, z, left, right, middle;
+
+ /*
+ * b7 b6 b5 b4 b3 b2 b1 b0
+ * Byte0 0 1 0 0 1 0 0 0
+ * Byte1 1 1 * * 1 M R L
+ * Byte2 X7 1 X5 X4 X3 X2 X1 X0
+ * Byte3 Z6 1 Y6 X6 1 Y2 Y1 Y0
+ * Byte4 Y7 0 Y5 Y4 Y3 1 1 0
+ * Byte5 T&P 0 Z5 Z4 Z3 Z2 Z1 Z0
+ * M / R / L: Middle / Right / Left button
+ */
+
+ x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2);
+ y = (packet[3] & 0x07) | (packet[4] & 0xb8) |
+ ((packet[3] & 0x20) << 1);
+ z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
+
+ left = (packet[1] & 0x01);
+ right = (packet[1] & 0x02) >> 1;
+ middle = (packet[1] & 0x04) >> 2;
+
+ /* Divide 2 since trackpoint's speed is too fast */
+ input_report_rel(dev2, REL_X, (char)x / 2);
+ input_report_rel(dev2, REL_Y, -((char)y / 2));
+
+ input_report_key(dev2, BTN_LEFT, left);
+ input_report_key(dev2, BTN_RIGHT, right);
+ input_report_key(dev2, BTN_MIDDLE, middle);
+
+ input_sync(dev2);
+}
+
+static void alps_process_touchpad_packet_v7(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct input_dev *dev = psmouse->dev;
+ struct alps_fields *f = &priv->f;
+
+ memset(f, 0, sizeof(*f));
+
+ if (priv->decode_fields(f, psmouse->packet, psmouse))
+ return;
+
+ alps_report_mt_data(psmouse, alps_get_mt_count(f->mt));
+
+ input_mt_report_finger_count(dev, f->fingers);
+
+ input_report_key(dev, BTN_LEFT, f->left);
+ input_report_key(dev, BTN_RIGHT, f->right);
+ input_report_key(dev, BTN_MIDDLE, f->middle);
input_sync(dev);
}
+static void alps_process_packet_v7(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+
+ if (packet[0] == 0x48 && (packet[4] & 0x47) == 0x06)
+ alps_process_trackstick_packet_v7(psmouse);
+ else
+ alps_process_touchpad_packet_v7(psmouse);
+}
+
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
@@ -1080,6 +1189,14 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
+ if (priv->proto_version == ALPS_PROTO_V7 &&
+ !alps_is_valid_package_v7(psmouse)) {
+ psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
+ psmouse->pktcnt - 1,
+ psmouse->packet[psmouse->pktcnt - 1]);
+ return PSMOUSE_BAD_DATA;
+ }
+
if (psmouse->pktcnt == psmouse->pktsize) {
priv->process_packet(psmouse);
return PSMOUSE_FULL_PACKET;
@@ -1192,6 +1309,22 @@ static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
return 0;
}
+static bool alps_check_valid_firmware_id(unsigned char id[])
+{
+ if (id[0] == 0x73)
+ return true;
+
+ if (id[0] == 0x88 &&
+ (id[1] == 0x07 ||
+ id[1] == 0x08 ||
+ (id[1] & 0xf0) == 0xb0 ||
+ (id[1] & 0xf0) == 0xc0)) {
+ return true;
+ }
+
+ return false;
+}
+
static int alps_enter_command_mode(struct psmouse *psmouse)
{
unsigned char param[4];
@@ -1201,8 +1334,7 @@ static int alps_enter_command_mode(struct psmouse *psmouse)
return -1;
}
- if ((param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) &&
- param[0] != 0x73) {
+ if (!alps_check_valid_firmware_id(param)) {
psmouse_dbg(psmouse,
"unknown response while entering command mode\n");
return -1;
@@ -1660,6 +1792,45 @@ error:
return -1;
}
+static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
+{
+ int reg, x_pitch, y_pitch, x_electrode, y_electrode, x_phys, y_phys;
+ struct alps_data *priv = psmouse->private;
+
+ reg = alps_command_mode_read_reg(psmouse, reg_pitch);
+ if (reg < 0)
+ return reg;
+
+ x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
+
+ y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
+ y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
+
+ reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
+ if (reg < 0)
+ return reg;
+
+ x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
+ x_electrode = 17 + x_electrode;
+
+ y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
+ y_electrode = 13 + y_electrode;
+
+ x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
+ y_phys = y_pitch * (y_electrode - 1); /* In 0.1 mm units */
+
+ priv->x_res = priv->x_max * 10 / x_phys; /* units / mm */
+ priv->y_res = priv->y_max * 10 / y_phys; /* units / mm */
+
+ psmouse_dbg(psmouse,
+ "pitch %dx%d num-electrodes %dx%d physical size %dx%d mm res %dx%d\n",
+ x_pitch, y_pitch, x_electrode, y_electrode,
+ x_phys / 10, y_phys / 10, priv->x_res, priv->y_res);
+
+ return 0;
+}
+
static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
@@ -1680,6 +1851,9 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
goto error;
+ if (alps_get_v3_v7_resolution(psmouse, 0xc2da))
+ goto error;
+
reg_val = alps_command_mode_read_reg(psmouse, 0xc2c6);
if (reg_val == -1)
goto error;
@@ -1856,6 +2030,35 @@ static int alps_hw_init_dolphin_v1(struct psmouse *psmouse)
return 0;
}
+static int alps_hw_init_v7(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val, ret = -1;
+
+ if (alps_enter_command_mode(psmouse) ||
+ alps_command_mode_read_reg(psmouse, 0xc2d9) == -1)
+ goto error;
+
+ if (alps_get_v3_v7_resolution(psmouse, 0xc397))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64))
+ goto error;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+ return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE);
+
+error:
+ alps_exit_command_mode(psmouse);
+ return ret;
+}
+
static void alps_set_defaults(struct alps_data *priv)
{
priv->byte0 = 0x8f;
@@ -1914,6 +2117,21 @@ static void alps_set_defaults(struct alps_data *priv)
priv->x_max = 2047;
priv->y_max = 1535;
break;
+ case ALPS_PROTO_V7:
+ priv->hw_init = alps_hw_init_v7;
+ priv->process_packet = alps_process_packet_v7;
+ priv->decode_fields = alps_decode_packet_v7;
+ priv->set_abs_params = alps_set_abs_params_mt;
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+ priv->x_max = 0xfff;
+ priv->y_max = 0x7ff;
+ priv->byte0 = 0x48;
+ priv->mask0 = 0x48;
+
+ if (priv->fw_ver[1] != 0xba)
+ priv->flags |= ALPS_BUTTONPAD;
+ break;
}
}
@@ -1972,6 +2190,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
alps_exit_command_mode(psmouse))
return -EIO;
+ /* Save the Firmware version */
+ memcpy(priv->fw_ver, ec, 3);
+
if (alps_match_table(psmouse, priv, e7, ec) == 0) {
return 0;
} else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
@@ -1982,6 +2203,12 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
return -EIO;
else
return 0;
+ } else if (ec[0] == 0x88 &&
+ ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
+ priv->proto_version = ALPS_PROTO_V7;
+ alps_set_defaults(priv);
+
+ return 0;
} else if (ec[0] == 0x88 && ec[1] == 0x08) {
priv->proto_version = ALPS_PROTO_V3;
alps_set_defaults(priv);
@@ -1990,6 +2217,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
priv->decode_fields = alps_decode_rushmore;
priv->x_bits = 16;
priv->y_bits = 12;
+ priv->flags |= ALPS_IS_RUSHMORE;
/* hack to make addr_command, nibble_command available */
psmouse->private = priv;
@@ -2044,17 +2272,21 @@ static void alps_set_abs_params_st(struct alps_data *priv,
static void alps_set_abs_params_mt(struct alps_data *priv,
struct input_dev *dev1)
{
- set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
- input_mt_init_slots(dev1, 2, 0);
input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0);
input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0);
- set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+ input_abs_set_res(dev1, ABS_MT_POSITION_X, priv->x_res);
+ input_abs_set_res(dev1, ABS_MT_POSITION_Y, priv->y_res);
+
+ input_mt_init_slots(dev1, MAX_TOUCHES, INPUT_MT_POINTER |
+ INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK | INPUT_MT_SEMI_MT);
+
set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
- input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0);
+ /* V7 is real multi-touch */
+ if (priv->proto_version == ALPS_PROTO_V7)
+ clear_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
}
int alps_init(struct psmouse *psmouse)
@@ -2100,7 +2332,9 @@ int alps_init(struct psmouse *psmouse)
dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
priv->set_abs_params(priv, dev1);
- input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
+ /* No pressure on V7 */
+ if (priv->proto_version != ALPS_PROTO_V7)
+ input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
if (priv->flags & ALPS_WHEEL) {
dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL);
@@ -2117,6 +2351,9 @@ int alps_init(struct psmouse *psmouse)
dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1);
dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2);
dev1->keybit[BIT_WORD(BTN_3)] |= BIT_MASK(BTN_3);
+ } else if (priv->flags & ALPS_BUTTONPAD) {
+ set_bit(INPUT_PROP_BUTTONPAD, dev1->propbit);
+ clear_bit(BTN_RIGHT, dev1->keybit);
} else {
dev1->keybit[BIT_WORD(BTN_MIDDLE)] |= BIT_MASK(BTN_MIDDLE);
}
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 03f88b6940c7..66240b47819a 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -12,17 +12,39 @@
#ifndef _ALPS_H
#define _ALPS_H
+#include <linux/input/mt.h>
+
#define ALPS_PROTO_V1 1
#define ALPS_PROTO_V2 2
#define ALPS_PROTO_V3 3
#define ALPS_PROTO_V4 4
#define ALPS_PROTO_V5 5
#define ALPS_PROTO_V6 6
+#define ALPS_PROTO_V7 7 /* t3btl t4s */
+
+#define MAX_TOUCHES 2
#define DOLPHIN_COUNT_PER_ELECTRODE 64
#define DOLPHIN_PROFILE_XOFFSET 8 /* x-electrode offset */
#define DOLPHIN_PROFILE_YOFFSET 1 /* y-electrode offset */
+/*
+ * enum V7_PACKET_ID - defines the packet type for V7
+ * V7_PACKET_ID_IDLE: There's no finger and no button activity.
+ * V7_PACKET_ID_TWO: There's one or two non-resting fingers on touchpad
+ * or there's button activities.
+ * V7_PACKET_ID_MULTI: There are at least three non-resting fingers.
+ * V7_PACKET_ID_NEW: The finger position in slot is not continues from
+ * previous packet.
+*/
+enum V7_PACKET_ID {
+ V7_PACKET_ID_IDLE,
+ V7_PACKET_ID_TWO,
+ V7_PACKET_ID_MULTI,
+ V7_PACKET_ID_NEW,
+ V7_PACKET_ID_UNKNOWN,
+};
+
/**
* struct alps_model_info - touchpad ID table
* @signature: E7 response string to match.
@@ -46,7 +68,7 @@ struct alps_model_info {
unsigned char command_mode_resp;
unsigned char proto_version;
unsigned char byte0, mask0;
- unsigned char flags;
+ int flags;
};
/**
@@ -65,14 +87,19 @@ struct alps_nibble_commands {
unsigned char data;
};
+struct alps_bitmap_point {
+ int start_bit;
+ int num_bits;
+};
+
/**
* struct alps_fields - decoded version of the report packet
* @x_map: Bitmap of active X positions for MT.
* @y_map: Bitmap of active Y positions for MT.
* @fingers: Number of fingers for MT.
- * @x: X position for ST.
- * @y: Y position for ST.
- * @z: Z position for ST.
+ * @pressure: Pressure.
+ * @st: position for ST.
+ * @mt: position for MT.
* @first_mp: Packet is the first of a multi-packet report.
* @is_mp: Packet is part of a multi-packet report.
* @left: Left touchpad button is active.
@@ -86,9 +113,11 @@ struct alps_fields {
unsigned int x_map;
unsigned int y_map;
unsigned int fingers;
- unsigned int x;
- unsigned int y;
- unsigned int z;
+
+ int pressure;
+ struct input_mt_pos st;
+ struct input_mt_pos mt[MAX_TOUCHES];
+
unsigned int first_mp:1;
unsigned int is_mp:1;
@@ -113,6 +142,7 @@ struct alps_fields {
* known format for this model. The first byte of the report, ANDed with
* mask0, should match byte0.
* @mask0: The mask used to check the first byte of the report.
+ * @fw_ver: cached copy of firmware version (EC report)
* @flags: Additional device capabilities (passthrough port, trackstick, etc.).
* @x_max: Largest possible X position value.
* @y_max: Largest possible Y position value.
@@ -125,11 +155,7 @@ struct alps_fields {
* @prev_fin: Finger bit from previous packet.
* @multi_packet: Multi-packet data in progress.
* @multi_data: Saved multi-packet data.
- * @x1: First X coordinate from last MT report.
- * @x2: Second X coordinate from last MT report.
- * @y1: First Y coordinate from last MT report.
- * @y2: Second Y coordinate from last MT report.
- * @fingers: Number of fingers from last MT report.
+ * @f: Decoded packet data fields.
* @quirks: Bitmap of ALPS_QUIRK_*.
* @timer: Timer for flushing out the final report packet in the stream.
*/
@@ -142,23 +168,25 @@ struct alps_data {
int addr_command;
unsigned char proto_version;
unsigned char byte0, mask0;
- unsigned char flags;
+ unsigned char fw_ver[3];
+ int flags;
int x_max;
int y_max;
int x_bits;
int y_bits;
+ unsigned int x_res;
+ unsigned int y_res;
int (*hw_init)(struct psmouse *psmouse);
void (*process_packet)(struct psmouse *psmouse);
- void (*decode_fields)(struct alps_fields *f, unsigned char *p,
+ int (*decode_fields)(struct alps_fields *f, unsigned char *p,
struct psmouse *psmouse);
void (*set_abs_params)(struct alps_data *priv, struct input_dev *dev1);
int prev_fin;
int multi_packet;
unsigned char multi_data[6];
- int x1, x2, y1, y2;
- int fingers;
+ struct alps_fields f;
u8 quirks;
struct timer_list timer;
};
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 613261994621..e74e5d6e5f9f 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -170,6 +170,15 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
serio_interrupt(kbd_dev->hv_serio, scan_code, 0);
}
spin_unlock_irqrestore(&kbd_dev->lock, flags);
+
+ /*
+ * Only trigger a wakeup on key down, otherwise
+ * "echo freeze > /sys/power/state" can't really enter the
+ * state because the Enter-UP can trigger a wakeup at once.
+ */
+ if (!(info & IS_BREAK))
+ pm_wakeup_event(&hv_dev->device, 0);
+
break;
default:
@@ -376,6 +385,9 @@ static int hv_kbd_probe(struct hv_device *hv_dev,
goto err_close_vmbus;
serio_register_port(kbd_dev->hv_serio);
+
+ device_init_wakeup(&hv_dev->device, true);
+
return 0;
err_close_vmbus:
@@ -390,6 +402,7 @@ static int hv_kbd_remove(struct hv_device *hv_dev)
{
struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
+ device_init_wakeup(&hv_dev->device, false);
serio_unregister_port(kbd_dev->hv_serio);
vmbus_close(hv_dev->channel);
kfree(kbd_dev);
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index bed7cbf84cfd..623bb9e0d5a4 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -73,20 +73,14 @@ config TABLET_USB_KBTAB
To compile this driver as a module, choose M here: the
module will be called kbtab.
-config TABLET_USB_WACOM
- tristate "Wacom Intuos/Graphire tablet support (USB)"
- depends on USB_ARCH_HAS_HCD
- select POWER_SUPPLY
- select USB
- select NEW_LEDS
- select LEDS_CLASS
+config TABLET_SERIAL_WACOM4
+ tristate "Wacom protocol 4 serial tablet support"
+ select SERIO
help
- Say Y here if you want to use the USB version of the Wacom Intuos
- or Graphire tablet. Make sure to say Y to "Mouse support"
- (CONFIG_INPUT_MOUSEDEV) and/or "Event interface support"
- (CONFIG_INPUT_EVDEV) as well.
+ Say Y here if you want to use Wacom protocol 4 serial tablets.
+ E.g. serial versions of the Cintiq, Graphire or Penpartner.
To compile this driver as a module, choose M here: the
- module will be called wacom.
+ module will be called wacom_serial4.
endif
diff --git a/drivers/input/tablet/Makefile b/drivers/input/tablet/Makefile
index 3f6c25220638..2e130101cf3c 100644
--- a/drivers/input/tablet/Makefile
+++ b/drivers/input/tablet/Makefile
@@ -2,12 +2,10 @@
# Makefile for the tablet drivers
#
-# Multipart objects.
-wacom-objs := wacom_wac.o wacom_sys.o
obj-$(CONFIG_TABLET_USB_ACECAD) += acecad.o
obj-$(CONFIG_TABLET_USB_AIPTEK) += aiptek.o
obj-$(CONFIG_TABLET_USB_GTCO) += gtco.o
obj-$(CONFIG_TABLET_USB_HANWANG) += hanwang.o
obj-$(CONFIG_TABLET_USB_KBTAB) += kbtab.o
-obj-$(CONFIG_TABLET_USB_WACOM) += wacom.o
+obj-$(CONFIG_TABLET_SERIAL_WACOM4) += wacom_serial4.o
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
new file mode 100644
index 000000000000..20ab802461e7
--- /dev/null
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -0,0 +1,620 @@
+/*
+ * Wacom protocol 4 serial tablet driver
+ *
+ * Copyright 2014 Hans de Goede <hdegoede@redhat.com>
+ * Copyright 2011-2012 Julian Squires <julian@cipht.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version of 2 of the License, or (at your
+ * option) any later version. See the file COPYING in the main directory of
+ * this archive for more details.
+ *
+ * Many thanks to Bill Seremetis, without whom PenPartner support
+ * would not have been possible. Thanks to Patrick Mahoney.
+ *
+ * This driver was developed with reference to much code written by others,
+ * particularly:
+ * - elo, gunze drivers by Vojtech Pavlik <vojtech@ucw.cz>;
+ * - wacom_w8001 driver by Jaya Kumar <jayakumar.lkml@gmail.com>;
+ * - the USB wacom input driver, credited to many people
+ * (see drivers/input/tablet/wacom.h);
+ * - new and old versions of linuxwacom / xf86-input-wacom credited to
+ * Frederic Lepied, France. <Lepied@XFree86.org> and
+ * Ping Cheng, Wacom. <pingc@wacom.com>;
+ * - and xf86wacom.c (a presumably ancient version of the linuxwacom code),
+ * by Frederic Lepied and Raph Levien <raph@gtk.org>.
+ *
+ * To do:
+ * - support pad buttons; (requires access to a model with pad buttons)
+ * - support (protocol 4-style) tilt (requires access to a > 1.4 rom model)
+ */
+
+/*
+ * Wacom serial protocol 4 documentation taken from linuxwacom-0.9.9 code,
+ * protocol 4 uses 7 or 9 byte of data in the following format:
+ *
+ * Byte 1
+ * bit 7 Sync bit always 1
+ * bit 6 Pointing device detected
+ * bit 5 Cursor = 0 / Stylus = 1
+ * bit 4 Reserved
+ * bit 3 1 if a button on the pointing device has been pressed
+ * bit 2 P0 (optional)
+ * bit 1 X15
+ * bit 0 X14
+ *
+ * Byte 2
+ * bit 7 Always 0
+ * bits 6-0 = X13 - X7
+ *
+ * Byte 3
+ * bit 7 Always 0
+ * bits 6-0 = X6 - X0
+ *
+ * Byte 4
+ * bit 7 Always 0
+ * bit 6 B3
+ * bit 5 B2
+ * bit 4 B1
+ * bit 3 B0
+ * bit 2 P1 (optional)
+ * bit 1 Y15
+ * bit 0 Y14
+ *
+ * Byte 5
+ * bit 7 Always 0
+ * bits 6-0 = Y13 - Y7
+ *
+ * Byte 6
+ * bit 7 Always 0
+ * bits 6-0 = Y6 - Y0
+ *
+ * Byte 7
+ * bit 7 Always 0
+ * bit 6 Sign of pressure data; or wheel-rel for cursor tool
+ * bit 5 P7; or REL1 for cursor tool
+ * bit 4 P6; or REL0 for cursor tool
+ * bit 3 P5
+ * bit 2 P4
+ * bit 1 P3
+ * bit 0 P2
+ *
+ * byte 8 and 9 are optional and present only
+ * in tilt mode.
+ *
+ * Byte 8
+ * bit 7 Always 0
+ * bit 6 Sign of tilt X
+ * bit 5 Xt6
+ * bit 4 Xt5
+ * bit 3 Xt4
+ * bit 2 Xt3
+ * bit 1 Xt2
+ * bit 0 Xt1
+ *
+ * Byte 9
+ * bit 7 Always 0
+ * bit 6 Sign of tilt Y
+ * bit 5 Yt6
+ * bit 4 Yt5
+ * bit 3 Yt4
+ * bit 2 Yt3
+ * bit 1 Yt2
+ * bit 0 Yt1
+ */
+
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/serio.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+MODULE_AUTHOR("Julian Squires <julian@cipht.net>, Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION("Wacom protocol 4 serial tablet driver");
+MODULE_LICENSE("GPL");
+
+#define REQUEST_MODEL_AND_ROM_VERSION "~#"
+#define REQUEST_MAX_COORDINATES "~C\r"
+#define REQUEST_CONFIGURATION_STRING "~R\r"
+#define REQUEST_RESET_TO_PROTOCOL_IV "\r#"
+/*
+ * Note: sending "\r$\r" causes at least the Digitizer II to send
+ * packets in ASCII instead of binary. "\r#" seems to undo that.
+ */
+
+#define COMMAND_START_SENDING_PACKETS "ST\r"
+#define COMMAND_STOP_SENDING_PACKETS "SP\r"
+#define COMMAND_MULTI_MODE_INPUT "MU1\r"
+#define COMMAND_ORIGIN_IN_UPPER_LEFT "OC1\r"
+#define COMMAND_ENABLE_ALL_MACRO_BUTTONS "~M0\r"
+#define COMMAND_DISABLE_GROUP_1_MACRO_BUTTONS "~M1\r"
+#define COMMAND_TRANSMIT_AT_MAX_RATE "IT0\r"
+#define COMMAND_DISABLE_INCREMENTAL_MODE "IN0\r"
+#define COMMAND_ENABLE_CONTINUOUS_MODE "SR\r"
+#define COMMAND_ENABLE_PRESSURE_MODE "PH1\r"
+#define COMMAND_Z_FILTER "ZF1\r"
+
+/* Note that this is a protocol 4 packet without tilt information. */
+#define PACKET_LENGTH 7
+#define DATA_SIZE 32
+
+/* flags */
+#define F_COVERS_SCREEN 0x01
+#define F_HAS_STYLUS2 0x02
+#define F_HAS_SCROLLWHEEL 0x04
+
+/* device IDs */
+#define STYLUS_DEVICE_ID 0x02
+#define CURSOR_DEVICE_ID 0x06
+#define ERASER_DEVICE_ID 0x0A
+
+enum { STYLUS = 1, ERASER, CURSOR };
+
+static const struct {
+ int device_id;
+ int input_id;
+} tools[] = {
+ { 0, 0 },
+ { STYLUS_DEVICE_ID, BTN_TOOL_PEN },
+ { ERASER_DEVICE_ID, BTN_TOOL_RUBBER },
+ { CURSOR_DEVICE_ID, BTN_TOOL_MOUSE },
+};
+
+struct wacom {
+ struct input_dev *dev;
+ struct completion cmd_done;
+ int result;
+ u8 expect;
+ u8 eraser_mask;
+ unsigned int extra_z_bits;
+ unsigned int flags;
+ unsigned int res_x, res_y;
+ unsigned int max_x, max_y;
+ unsigned int tool;
+ unsigned int idx;
+ u8 data[DATA_SIZE];
+ char phys[32];
+};
+
+enum {
+ MODEL_CINTIQ = 0x504C, /* PL */
+ MODEL_CINTIQ2 = 0x4454, /* DT */
+ MODEL_DIGITIZER_II = 0x5544, /* UD */
+ MODEL_GRAPHIRE = 0x4554, /* ET */
+ MODEL_PENPARTNER = 0x4354, /* CT */
+};
+
+static void wacom_handle_model_response(struct wacom *wacom)
+{
+ int major_v, minor_v, r = 0;
+ char *p;
+
+ p = strrchr(wacom->data, 'V');
+ if (p)
+ r = sscanf(p + 1, "%u.%u", &major_v, &minor_v);
+ if (r != 2)
+ major_v = minor_v = 0;
+
+ switch (wacom->data[2] << 8 | wacom->data[3]) {
+ case MODEL_CINTIQ: /* UNTESTED */
+ case MODEL_CINTIQ2:
+ if ((wacom->data[2] << 8 | wacom->data[3]) == MODEL_CINTIQ) {
+ wacom->dev->name = "Wacom Cintiq";
+ wacom->dev->id.version = MODEL_CINTIQ;
+ } else {
+ wacom->dev->name = "Wacom Cintiq II";
+ wacom->dev->id.version = MODEL_CINTIQ2;
+ }
+ wacom->res_x = 508;
+ wacom->res_y = 508;
+
+ switch (wacom->data[5] << 8 | wacom->data[6]) {
+ case 0x3731: /* PL-710 */
+ wacom->res_x = 2540;
+ wacom->res_y = 2540;
+ /* fall through */
+ case 0x3535: /* PL-550 */
+ case 0x3830: /* PL-800 */
+ wacom->extra_z_bits = 2;
+ }
+
+ wacom->flags = F_COVERS_SCREEN;
+ break;
+
+ case MODEL_PENPARTNER:
+ wacom->dev->name = "Wacom Penpartner";
+ wacom->dev->id.version = MODEL_PENPARTNER;
+ wacom->res_x = 1000;
+ wacom->res_y = 1000;
+ break;
+
+ case MODEL_GRAPHIRE:
+ wacom->dev->name = "Wacom Graphire";
+ wacom->dev->id.version = MODEL_GRAPHIRE;
+ wacom->res_x = 1016;
+ wacom->res_y = 1016;
+ wacom->max_x = 5103;
+ wacom->max_y = 3711;
+ wacom->extra_z_bits = 2;
+ wacom->eraser_mask = 0x08;
+ wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
+ break;
+
+ case MODEL_DIGITIZER_II:
+ wacom->dev->name = "Wacom Digitizer II";
+ wacom->dev->id.version = MODEL_DIGITIZER_II;
+ if (major_v == 1 && minor_v <= 2)
+ wacom->extra_z_bits = 0; /* UNTESTED */
+ break;
+
+ default:
+ dev_err(&wacom->dev->dev, "Unsupported Wacom model %s\n",
+ wacom->data);
+ wacom->result = -ENODEV;
+ return;
+ }
+
+ dev_info(&wacom->dev->dev, "%s tablet, version %u.%u\n",
+ wacom->dev->name, major_v, minor_v);
+}
+
+static void wacom_handle_configuration_response(struct wacom *wacom)
+{
+ int r, skip;
+
+ dev_dbg(&wacom->dev->dev, "Configuration string: %s\n", wacom->data);
+ r = sscanf(wacom->data, "~R%x,%u,%u,%u,%u", &skip, &skip, &skip,
+ &wacom->res_x, &wacom->res_y);
+ if (r != 5)
+ dev_warn(&wacom->dev->dev, "could not get resolution\n");
+}
+
+static void wacom_handle_coordinates_response(struct wacom *wacom)
+{
+ int r;
+
+ dev_dbg(&wacom->dev->dev, "Coordinates string: %s\n", wacom->data);
+ r = sscanf(wacom->data, "~C%u,%u", &wacom->max_x, &wacom->max_y);
+ if (r != 2)
+ dev_warn(&wacom->dev->dev, "could not get max coordinates\n");
+}
+
+static void wacom_handle_response(struct wacom *wacom)
+{
+ if (wacom->data[0] != '~' || wacom->data[1] != wacom->expect) {
+ dev_err(&wacom->dev->dev,
+ "Wacom got an unexpected response: %s\n", wacom->data);
+ wacom->result = -EIO;
+ } else {
+ wacom->result = 0;
+
+ switch (wacom->data[1]) {
+ case '#':
+ wacom_handle_model_response(wacom);
+ break;
+ case 'R':
+ wacom_handle_configuration_response(wacom);
+ break;
+ case 'C':
+ wacom_handle_coordinates_response(wacom);
+ break;
+ }
+ }
+
+ complete(&wacom->cmd_done);
+}
+
+static void wacom_handle_packet(struct wacom *wacom)
+{
+ u8 in_proximity_p, stylus_p, button;
+ unsigned int tool;
+ int x, y, z;
+
+ in_proximity_p = wacom->data[0] & 0x40;
+ stylus_p = wacom->data[0] & 0x20;
+ button = (wacom->data[3] & 0x78) >> 3;
+ x = (wacom->data[0] & 3) << 14 | wacom->data[1]<<7 | wacom->data[2];
+ y = (wacom->data[3] & 3) << 14 | wacom->data[4]<<7 | wacom->data[5];
+
+ if (in_proximity_p && stylus_p) {
+ z = wacom->data[6] & 0x7f;
+ if (wacom->extra_z_bits >= 1)
+ z = z << 1 | (wacom->data[3] & 0x4) >> 2;
+ if (wacom->extra_z_bits > 1)
+ z = z << 1 | (wacom->data[0] & 0x4) >> 2;
+ z = z ^ (0x40 << wacom->extra_z_bits);
+ } else {
+ z = -1;
+ }
+
+ if (stylus_p)
+ tool = (button & wacom->eraser_mask) ? ERASER : STYLUS;
+ else
+ tool = CURSOR;
+
+ if (tool != wacom->tool && wacom->tool != 0) {
+ input_report_key(wacom->dev, tools[wacom->tool].input_id, 0);
+ input_sync(wacom->dev);
+ }
+ wacom->tool = tool;
+
+ input_report_key(wacom->dev, tools[tool].input_id, in_proximity_p);
+ input_report_abs(wacom->dev, ABS_MISC,
+ in_proximity_p ? tools[tool].device_id : 0);
+ input_report_abs(wacom->dev, ABS_X, x);
+ input_report_abs(wacom->dev, ABS_Y, y);
+ input_report_abs(wacom->dev, ABS_PRESSURE, z);
+ if (stylus_p) {
+ input_report_key(wacom->dev, BTN_TOUCH, button & 1);
+ input_report_key(wacom->dev, BTN_STYLUS, button & 2);
+ input_report_key(wacom->dev, BTN_STYLUS2, button & 4);
+ } else {
+ input_report_key(wacom->dev, BTN_LEFT, button & 1);
+ input_report_key(wacom->dev, BTN_RIGHT, button & 2);
+ input_report_key(wacom->dev, BTN_MIDDLE, button & 4);
+ /* handle relative wheel for non-stylus device */
+ z = (wacom->data[6] & 0x30) >> 4;
+ if (wacom->data[6] & 0x40)
+ z = -z;
+ input_report_rel(wacom->dev, REL_WHEEL, z);
+ }
+ input_sync(wacom->dev);
+}
+
+static void wacom_clear_data_buf(struct wacom *wacom)
+{
+ memset(wacom->data, 0, DATA_SIZE);
+ wacom->idx = 0;
+}
+
+static irqreturn_t wacom_interrupt(struct serio *serio, unsigned char data,
+ unsigned int flags)
+{
+ struct wacom *wacom = serio_get_drvdata(serio);
+
+ if (data & 0x80)
+ wacom->idx = 0;
+
+ /*
+ * We're either expecting a carriage return-terminated ASCII
+ * response string, or a seven-byte packet with the MSB set on
+ * the first byte.
+ *
+ * Note however that some tablets (the PenPartner, for
+ * example) don't send a carriage return at the end of a
+ * command. We handle these by waiting for timeout.
+ */
+ if (data == '\r' && !(wacom->data[0] & 0x80)) {
+ wacom_handle_response(wacom);
+ wacom_clear_data_buf(wacom);
+ return IRQ_HANDLED;
+ }
+
+ /* Leave place for 0 termination */
+ if (wacom->idx > (DATA_SIZE - 2)) {
+ dev_dbg(&wacom->dev->dev,
+ "throwing away %d bytes of garbage\n", wacom->idx);
+ wacom_clear_data_buf(wacom);
+ }
+ wacom->data[wacom->idx++] = data;
+
+ if (wacom->idx == PACKET_LENGTH && (wacom->data[0] & 0x80)) {
+ wacom_handle_packet(wacom);
+ wacom_clear_data_buf(wacom);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void wacom_disconnect(struct serio *serio)
+{
+ struct wacom *wacom = serio_get_drvdata(serio);
+
+ serio_close(serio);
+ serio_set_drvdata(serio, NULL);
+ input_unregister_device(wacom->dev);
+ kfree(wacom);
+}
+
+static int wacom_send(struct serio *serio, const u8 *command)
+{
+ int err = 0;
+
+ for (; !err && *command; command++)
+ err = serio_write(serio, *command);
+
+ return err;
+}
+
+static int wacom_send_setup_string(struct wacom *wacom, struct serio *serio)
+{
+ const u8 *cmd;
+
+ switch (wacom->dev->id.version) {
+ case MODEL_CINTIQ: /* UNTESTED */
+ cmd = COMMAND_ORIGIN_IN_UPPER_LEFT
+ COMMAND_TRANSMIT_AT_MAX_RATE
+ COMMAND_ENABLE_CONTINUOUS_MODE
+ COMMAND_START_SENDING_PACKETS;
+ break;
+
+ case MODEL_PENPARTNER:
+ cmd = COMMAND_ENABLE_PRESSURE_MODE
+ COMMAND_START_SENDING_PACKETS;
+ break;
+
+ default:
+ cmd = COMMAND_MULTI_MODE_INPUT
+ COMMAND_ORIGIN_IN_UPPER_LEFT
+ COMMAND_ENABLE_ALL_MACRO_BUTTONS
+ COMMAND_DISABLE_GROUP_1_MACRO_BUTTONS
+ COMMAND_TRANSMIT_AT_MAX_RATE
+ COMMAND_DISABLE_INCREMENTAL_MODE
+ COMMAND_ENABLE_CONTINUOUS_MODE
+ COMMAND_Z_FILTER
+ COMMAND_START_SENDING_PACKETS;
+ break;
+ }
+
+ return wacom_send(serio, cmd);
+}
+
+static int wacom_send_and_wait(struct wacom *wacom, struct serio *serio,
+ const u8 *cmd, const char *desc)
+{
+ int err;
+ unsigned long u;
+
+ wacom->expect = cmd[1];
+ init_completion(&wacom->cmd_done);
+
+ err = wacom_send(serio, cmd);
+ if (err)
+ return err;
+
+ u = wait_for_completion_timeout(&wacom->cmd_done, HZ);
+ if (u == 0) {
+ /* Timeout, process what we've received. */
+ wacom_handle_response(wacom);
+ }
+
+ wacom->expect = 0;
+ return wacom->result;
+}
+
+static int wacom_setup(struct wacom *wacom, struct serio *serio)
+{
+ int err;
+
+ /* Note that setting the link speed is the job of inputattach.
+ * We assume that reset negotiation has already happened,
+ * here. */
+ err = wacom_send_and_wait(wacom, serio, REQUEST_MODEL_AND_ROM_VERSION,
+ "model and version");
+ if (err)
+ return err;
+
+ if (!(wacom->res_x && wacom->res_y)) {
+ err = wacom_send_and_wait(wacom, serio,
+ REQUEST_CONFIGURATION_STRING,
+ "configuration string");
+ if (err)
+ return err;
+ }
+
+ if (!(wacom->max_x && wacom->max_y)) {
+ err = wacom_send_and_wait(wacom, serio,
+ REQUEST_MAX_COORDINATES,
+ "coordinates string");
+ if (err)
+ return err;
+ }
+
+ return wacom_send_setup_string(wacom, serio);
+}
+
+static int wacom_connect(struct serio *serio, struct serio_driver *drv)
+{
+ struct wacom *wacom;
+ struct input_dev *input_dev;
+ int err = -ENOMEM;
+
+ wacom = kzalloc(sizeof(struct wacom), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!wacom || !input_dev)
+ goto free_device;
+
+ wacom->dev = input_dev;
+ wacom->extra_z_bits = 1;
+ wacom->eraser_mask = 0x04;
+ wacom->tool = wacom->idx = 0;
+ snprintf(wacom->phys, sizeof(wacom->phys), "%s/input0", serio->phys);
+ input_dev->phys = wacom->phys;
+ input_dev->id.bustype = BUS_RS232;
+ input_dev->id.vendor = SERIO_WACOM_IV;
+ input_dev->id.product = serio->id.extra;
+ input_dev->dev.parent = &serio->dev;
+
+ input_dev->evbit[0] =
+ BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) | BIT_MASK(EV_REL);
+ set_bit(ABS_MISC, input_dev->absbit);
+ set_bit(BTN_TOOL_PEN, input_dev->keybit);
+ set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
+ set_bit(BTN_TOOL_MOUSE, input_dev->keybit);
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(BTN_STYLUS, input_dev->keybit);
+ set_bit(BTN_LEFT, input_dev->keybit);
+ set_bit(BTN_RIGHT, input_dev->keybit);
+ set_bit(BTN_MIDDLE, input_dev->keybit);
+
+ serio_set_drvdata(serio, wacom);
+
+ err = serio_open(serio, drv);
+ if (err)
+ goto free_device;
+
+ err = wacom_setup(wacom, serio);
+ if (err)
+ goto close_serio;
+
+ set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+ if (!(wacom->flags & F_COVERS_SCREEN))
+ __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
+
+ if (wacom->flags & F_HAS_STYLUS2)
+ __set_bit(BTN_STYLUS2, input_dev->keybit);
+
+ if (wacom->flags & F_HAS_SCROLLWHEEL)
+ __set_bit(REL_WHEEL, input_dev->relbit);
+
+ input_abs_set_res(wacom->dev, ABS_X, wacom->res_x);
+ input_abs_set_res(wacom->dev, ABS_Y, wacom->res_y);
+ input_set_abs_params(wacom->dev, ABS_X, 0, wacom->max_x, 0, 0);
+ input_set_abs_params(wacom->dev, ABS_Y, 0, wacom->max_y, 0, 0);
+ input_set_abs_params(wacom->dev, ABS_PRESSURE, -1,
+ (1 << (7 + wacom->extra_z_bits)) - 1, 0, 0);
+
+ err = input_register_device(wacom->dev);
+ if (err)
+ goto close_serio;
+
+ return 0;
+
+close_serio:
+ serio_close(serio);
+free_device:
+ serio_set_drvdata(serio, NULL);
+ input_free_device(input_dev);
+ kfree(wacom);
+ return err;
+}
+
+static struct serio_device_id wacom_serio_ids[] = {
+ {
+ .type = SERIO_RS232,
+ .proto = SERIO_WACOM_IV,
+ .id = SERIO_ANY,
+ .extra = SERIO_ANY,
+ },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(serio, wacom_serio_ids);
+
+static struct serio_driver wacom_drv = {
+ .driver = {
+ .name = "wacom_serial4",
+ },
+ .description = "Wacom protocol 4 serial tablet driver",
+ .id_table = wacom_serio_ids,
+ .interrupt = wacom_interrupt,
+ .connect = wacom_connect,
+ .disconnect = wacom_disconnect,
+};
+
+module_serio_driver(wacom_drv);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index a23a94bb4bcb..6bb9a7dd23b6 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -471,6 +471,18 @@ config TOUCHSCREEN_HP7XX
To compile this driver as a module, choose M here: the
module will be called jornada720_ts.
+config TOUCHSCREEN_IPAQ_MICRO
+ tristate "HP iPAQ Atmel Micro ASIC touchscreen"
+ depends on MFD_IPAQ_MICRO
+ help
+ Say Y here to enable support for the touchscreen attached to
+ the Atmel Micro peripheral controller on iPAQ h3100/h3600/h3700
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ipaq-micro-ts.
+
config TOUCHSCREEN_HTCPEN
tristate "HTC Shift X9500 touchscreen"
depends on ISA
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 126479d8c29a..4be94fce41af 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_TOUCHSCREEN_MTOUCH) += mtouch.o
obj-$(CONFIG_TOUCHSCREEN_MK712) += mk712.o
obj-$(CONFIG_TOUCHSCREEN_HP600) += hp680_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_HP7XX) += jornada720_ts.o
+obj-$(CONFIG_TOUCHSCREEN_IPAQ_MICRO) += ipaq-micro-ts.o
obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index da201b8e37dc..e57ba52bf484 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -1302,8 +1302,10 @@ static int ads7846_probe(struct spi_device *spi)
pdata = dev_get_platdata(&spi->dev);
if (!pdata) {
pdata = ads7846_probe_dt(&spi->dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ if (IS_ERR(pdata)) {
+ err = PTR_ERR(pdata);
+ goto err_free_mem;
+ }
}
ts->model = pdata->model ? : 7846;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 6e0b4a2120d3..03b85711cb70 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2,6 +2,7 @@
* Atmel maXTouch Touchscreen driver
*
* Copyright (C) 2010 Samsung Electronics Co.Ltd
+ * Copyright (C) 2011-2014 Atmel Corporation
* Copyright (C) 2012 Google, Inc.
*
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
@@ -22,6 +23,7 @@
#include <linux/i2c/atmel_mxt_ts.h>
#include <linux/input/mt.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/slab.h>
/* Version */
@@ -29,8 +31,10 @@
#define MXT_VER_21 21
#define MXT_VER_22 22
-/* Firmware */
+/* Firmware files */
#define MXT_FW_NAME "maxtouch.fw"
+#define MXT_CFG_NAME "maxtouch.cfg"
+#define MXT_CFG_MAGIC "OBP_RAW V1"
/* Registers */
#define MXT_INFO 0x00
@@ -44,6 +48,8 @@
#define MXT_OBJECT_START 0x07
#define MXT_OBJECT_SIZE 6
+#define MXT_INFO_CHECKSUM_SIZE 3
+#define MXT_MAX_BLOCK_WRITE 256
/* Object types */
#define MXT_DEBUG_DIAGNOSTIC_T37 37
@@ -74,6 +80,9 @@
#define MXT_SPT_MESSAGECOUNT_T44 44
#define MXT_SPT_CTECONFIG_T46 46
+/* MXT_GEN_MESSAGE_T5 object */
+#define MXT_RPTID_NOMSG 0xff
+
/* MXT_GEN_COMMAND_T6 field */
#define MXT_COMMAND_RESET 0
#define MXT_COMMAND_BACKUPNV 1
@@ -83,11 +92,20 @@
/* Define for T6 status byte */
#define MXT_T6_STATUS_RESET (1 << 7)
+#define MXT_T6_STATUS_OFL (1 << 6)
+#define MXT_T6_STATUS_SIGERR (1 << 5)
+#define MXT_T6_STATUS_CAL (1 << 4)
+#define MXT_T6_STATUS_CFGERR (1 << 3)
+#define MXT_T6_STATUS_COMSERR (1 << 2)
/* MXT_GEN_POWER_T7 field */
-#define MXT_POWER_IDLEACQINT 0
-#define MXT_POWER_ACTVACQINT 1
-#define MXT_POWER_ACTV2IDLETO 2
+struct t7_config {
+ u8 idle;
+ u8 active;
+} __packed;
+
+#define MXT_POWER_CFG_RUN 0
+#define MXT_POWER_CFG_DEEPSLEEP 1
/* MXT_GEN_ACQUIRE_T8 field */
#define MXT_ACQUIRE_CHRGTIME 0
@@ -99,7 +117,6 @@
#define MXT_ACQUIRE_ATCHCALSTHR 7
/* MXT_TOUCH_MULTI_T9 field */
-#define MXT_TOUCH_CTRL 0
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@@ -217,11 +234,6 @@ struct mxt_object {
u8 num_report_ids;
} __packed;
-struct mxt_message {
- u8 reportid;
- u8 message[7];
-};
-
/* Each client has this additional data */
struct mxt_data {
struct i2c_client *client;
@@ -234,15 +246,28 @@ struct mxt_data {
unsigned int max_x;
unsigned int max_y;
bool in_bootloader;
+ u16 mem_size;
+ u8 max_reportid;
u32 config_crc;
+ u32 info_crc;
u8 bootloader_addr;
+ u8 *msg_buf;
+ u8 t6_status;
+ bool update_input;
+ u8 last_message_count;
+ u8 num_touchids;
+ struct t7_config t7_cfg;
/* Cached parameters from object table */
+ u16 T5_address;
+ u8 T5_msg_size;
u8 T6_reportid;
u16 T6_address;
+ u16 T7_address;
u8 T9_reportid_min;
u8 T9_reportid_max;
u8 T19_reportid;
+ u16 T44_address;
/* for fw update in bootloader */
struct completion bl_completion;
@@ -297,42 +322,10 @@ static bool mxt_object_readable(unsigned int type)
}
}
-static bool mxt_object_writable(unsigned int type)
-{
- switch (type) {
- case MXT_GEN_COMMAND_T6:
- case MXT_GEN_POWER_T7:
- case MXT_GEN_ACQUIRE_T8:
- case MXT_TOUCH_MULTI_T9:
- case MXT_TOUCH_KEYARRAY_T15:
- case MXT_TOUCH_PROXIMITY_T23:
- case MXT_TOUCH_PROXKEY_T52:
- case MXT_PROCI_GRIPFACE_T20:
- case MXT_PROCG_NOISE_T22:
- case MXT_PROCI_ONETOUCH_T24:
- case MXT_PROCI_TWOTOUCH_T27:
- case MXT_PROCI_GRIP_T40:
- case MXT_PROCI_PALM_T41:
- case MXT_PROCI_TOUCHSUPPRESSION_T42:
- case MXT_PROCI_STYLUS_T47:
- case MXT_PROCG_NOISESUPPRESSION_T48:
- case MXT_SPT_COMMSCONFIG_T18:
- case MXT_SPT_GPIOPWM_T19:
- case MXT_SPT_SELFTEST_T25:
- case MXT_SPT_CTECONFIG_T28:
- case MXT_SPT_DIGITIZER_T43:
- case MXT_SPT_CTECONFIG_T46:
- return true;
- default:
- return false;
- }
-}
-
-static void mxt_dump_message(struct device *dev,
- struct mxt_message *message)
+static void mxt_dump_message(struct mxt_data *data, u8 *message)
{
- dev_dbg(dev, "reportid: %u\tmessage: %*ph\n",
- message->reportid, 7, message->message);
+ dev_dbg(&data->client->dev, "message: %*ph\n",
+ data->T5_msg_size, message);
}
static int mxt_wait_for_completion(struct mxt_data *data,
@@ -401,7 +394,7 @@ static int mxt_bootloader_write(struct mxt_data *data,
return ret;
}
-static int mxt_lookup_bootloader_address(struct mxt_data *data)
+static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
{
u8 appmode = data->client->addr;
u8 bootloader;
@@ -409,6 +402,12 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data)
switch (appmode) {
case 0x4a:
case 0x4b:
+ /* Chips after 1664S use different scheme */
+ if (retry || data->info.family_id >= 0xa2) {
+ bootloader = appmode - 0x24;
+ break;
+ }
+ /* Fall through for normal case */
case 0x4c:
case 0x4d:
case 0x5a:
@@ -426,6 +425,30 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data)
return 0;
}
+static int mxt_probe_bootloader(struct mxt_data *data, bool retry)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+ u8 val;
+ bool crc_failure;
+
+ ret = mxt_lookup_bootloader_address(data, retry);
+ if (ret)
+ return ret;
+
+ ret = mxt_bootloader_read(data, &val, 1);
+ if (ret)
+ return ret;
+
+ /* Check app crc fail mode */
+ crc_failure = (val & ~MXT_BOOT_STATUS_MASK) == MXT_APP_CRC_FAIL;
+
+ dev_err(dev, "Detected bootloader, status:%02X%s\n",
+ val, crc_failure ? ", APP_CRC_FAIL" : "");
+
+ return 0;
+}
+
static u8 mxt_get_bootloader_version(struct mxt_data *data, u8 val)
{
struct device *dev = &data->client->dev;
@@ -447,14 +470,15 @@ static u8 mxt_get_bootloader_version(struct mxt_data *data, u8 val)
}
}
-static int mxt_check_bootloader(struct mxt_data *data, unsigned int state)
+static int mxt_check_bootloader(struct mxt_data *data, unsigned int state,
+ bool wait)
{
struct device *dev = &data->client->dev;
u8 val;
int ret;
recheck:
- if (state != MXT_WAITING_BOOTLOAD_CMD) {
+ if (wait) {
/*
* In application update mode, the interrupt
* line signals state transitions. We must wait for the
@@ -485,6 +509,7 @@ recheck:
switch (state) {
case MXT_WAITING_BOOTLOAD_CMD:
case MXT_WAITING_FRAME_DATA:
+ case MXT_APP_CRC_FAIL:
val &= ~MXT_BOOT_STATUS_MASK;
break;
case MXT_FRAME_CRC_PASS:
@@ -508,13 +533,18 @@ recheck:
return 0;
}
-static int mxt_unlock_bootloader(struct mxt_data *data)
+static int mxt_send_bootloader_cmd(struct mxt_data *data, bool unlock)
{
int ret;
u8 buf[2];
- buf[0] = MXT_UNLOCK_CMD_LSB;
- buf[1] = MXT_UNLOCK_CMD_MSB;
+ if (unlock) {
+ buf[0] = MXT_UNLOCK_CMD_LSB;
+ buf[1] = MXT_UNLOCK_CMD_MSB;
+ } else {
+ buf[0] = 0x01;
+ buf[1] = 0x01;
+ }
ret = mxt_bootloader_write(data, buf, 2);
if (ret)
@@ -605,40 +635,44 @@ mxt_get_object(struct mxt_data *data, u8 type)
return object;
}
- dev_err(&data->client->dev, "Invalid object type T%u\n", type);
+ dev_warn(&data->client->dev, "Invalid object type T%u\n", type);
return NULL;
}
-static int mxt_read_message(struct mxt_data *data,
- struct mxt_message *message)
+static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
{
- struct mxt_object *object;
- u16 reg;
-
- object = mxt_get_object(data, MXT_GEN_MESSAGE_T5);
- if (!object)
- return -EINVAL;
-
- reg = object->start_address;
- return __mxt_read_reg(data->client, reg,
- sizeof(struct mxt_message), message);
-}
+ struct device *dev = &data->client->dev;
+ u8 status = msg[1];
+ u32 crc = msg[2] | (msg[3] << 8) | (msg[4] << 16);
-static int mxt_write_object(struct mxt_data *data,
- u8 type, u8 offset, u8 val)
-{
- struct mxt_object *object;
- u16 reg;
+ complete(&data->crc_completion);
- object = mxt_get_object(data, type);
- if (!object || offset >= mxt_obj_size(object))
- return -EINVAL;
+ if (crc != data->config_crc) {
+ data->config_crc = crc;
+ dev_dbg(dev, "T6 Config Checksum: 0x%06X\n", crc);
+ }
- reg = object->start_address;
- return mxt_write_reg(data->client, reg + offset, val);
+ /* Detect reset */
+ if (status & MXT_T6_STATUS_RESET)
+ complete(&data->reset_completion);
+
+ /* Output debug if status has changed */
+ if (status != data->t6_status)
+ dev_dbg(dev, "T6 Status 0x%02X%s%s%s%s%s%s%s\n",
+ status,
+ status == 0 ? " OK" : "",
+ status & MXT_T6_STATUS_RESET ? " RESET" : "",
+ status & MXT_T6_STATUS_OFL ? " OFL" : "",
+ status & MXT_T6_STATUS_SIGERR ? " SIGERR" : "",
+ status & MXT_T6_STATUS_CAL ? " CAL" : "",
+ status & MXT_T6_STATUS_CFGERR ? " CFGERR" : "",
+ status & MXT_T6_STATUS_COMSERR ? " COMSERR" : "");
+
+ /* Save current status */
+ data->t6_status = status;
}
-static void mxt_input_button(struct mxt_data *data, struct mxt_message *message)
+static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
const struct mxt_platform_data *pdata = data->pdata;
@@ -649,30 +683,33 @@ static void mxt_input_button(struct mxt_data *data, struct mxt_message *message)
for (i = 0; i < pdata->t19_num_keys; i++) {
if (pdata->t19_keymap[i] == KEY_RESERVED)
continue;
- button = !(message->message[0] & (1 << i));
+ button = !(message[1] & (1 << i));
input_report_key(input, pdata->t19_keymap[i], button);
}
}
-static void mxt_input_sync(struct input_dev *input_dev)
+static void mxt_input_sync(struct mxt_data *data)
{
- input_mt_report_pointer_emulation(input_dev, false);
- input_sync(input_dev);
+ input_mt_report_pointer_emulation(data->input_dev,
+ data->pdata->t19_num_keys);
+ input_sync(data->input_dev);
}
-static void mxt_input_touchevent(struct mxt_data *data,
- struct mxt_message *message, int id)
+static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
{
struct device *dev = &data->client->dev;
- u8 status = message->message[0];
struct input_dev *input_dev = data->input_dev;
+ int id;
+ u8 status;
int x;
int y;
int area;
int amplitude;
- x = (message->message[1] << 4) | ((message->message[3] >> 4) & 0xf);
- y = (message->message[2] << 4) | ((message->message[3] & 0xf));
+ id = message[0] - data->T9_reportid_min;
+ status = message[1];
+ x = (message[2] << 4) | ((message[4] >> 4) & 0xf);
+ y = (message[3] << 4) | ((message[4] & 0xf));
/* Handle 10/12 bit switching */
if (data->max_x < 1024)
@@ -680,8 +717,8 @@ static void mxt_input_touchevent(struct mxt_data *data,
if (data->max_y < 1024)
y >>= 2;
- area = message->message[4];
- amplitude = message->message[5];
+ area = message[5];
+ amplitude = message[6];
dev_dbg(dev,
"[%u] %c%c%c%c%c%c%c%c x: %5u y: %5u area: %3u amp: %3u\n",
@@ -707,7 +744,7 @@ static void mxt_input_touchevent(struct mxt_data *data,
if (status & MXT_T9_RELEASE) {
input_mt_report_slot_state(input_dev,
MT_TOOL_FINGER, 0);
- mxt_input_sync(input_dev);
+ mxt_input_sync(data);
}
/* Touch active */
@@ -720,64 +757,179 @@ static void mxt_input_touchevent(struct mxt_data *data,
/* Touch no longer active, close out slot */
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 0);
}
+
+ data->update_input = true;
+}
+
+static int mxt_proc_message(struct mxt_data *data, u8 *message)
+{
+ u8 report_id = message[0];
+
+ if (report_id == MXT_RPTID_NOMSG)
+ return 0;
+
+ if (report_id == data->T6_reportid) {
+ mxt_proc_t6_messages(data, message);
+ } else if (!data->input_dev) {
+ /*
+ * Do not report events if input device
+ * is not yet registered.
+ */
+ mxt_dump_message(data, message);
+ } else if (report_id >= data->T9_reportid_min
+ && report_id <= data->T9_reportid_max) {
+ mxt_proc_t9_message(data, message);
+ } else if (report_id == data->T19_reportid) {
+ mxt_input_button(data, message);
+ data->update_input = true;
+ } else {
+ mxt_dump_message(data, message);
+ }
+
+ return 1;
}
-static u16 mxt_extract_T6_csum(const u8 *csum)
+static int mxt_read_and_process_messages(struct mxt_data *data, u8 count)
{
- return csum[0] | (csum[1] << 8) | (csum[2] << 16);
+ struct device *dev = &data->client->dev;
+ int ret;
+ int i;
+ u8 num_valid = 0;
+
+ /* Safety check for msg_buf */
+ if (count > data->max_reportid)
+ return -EINVAL;
+
+ /* Process remaining messages if necessary */
+ ret = __mxt_read_reg(data->client, data->T5_address,
+ data->T5_msg_size * count, data->msg_buf);
+ if (ret) {
+ dev_err(dev, "Failed to read %u messages (%d)\n", count, ret);
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = mxt_proc_message(data,
+ data->msg_buf + data->T5_msg_size * i);
+
+ if (ret == 1)
+ num_valid++;
+ }
+
+ /* return number of messages read */
+ return num_valid;
}
-static bool mxt_is_T9_message(struct mxt_data *data, struct mxt_message *msg)
+static irqreturn_t mxt_process_messages_t44(struct mxt_data *data)
{
- u8 id = msg->reportid;
- return (id >= data->T9_reportid_min && id <= data->T9_reportid_max);
+ struct device *dev = &data->client->dev;
+ int ret;
+ u8 count, num_left;
+
+ /* Read T44 and T5 together */
+ ret = __mxt_read_reg(data->client, data->T44_address,
+ data->T5_msg_size + 1, data->msg_buf);
+ if (ret) {
+ dev_err(dev, "Failed to read T44 and T5 (%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ count = data->msg_buf[0];
+
+ if (count == 0) {
+ dev_warn(dev, "Interrupt triggered but zero messages\n");
+ return IRQ_NONE;
+ } else if (count > data->max_reportid) {
+ dev_err(dev, "T44 count %d exceeded max report id\n", count);
+ count = data->max_reportid;
+ }
+
+ /* Process first message */
+ ret = mxt_proc_message(data, data->msg_buf + 1);
+ if (ret < 0) {
+ dev_warn(dev, "Unexpected invalid message\n");
+ return IRQ_NONE;
+ }
+
+ num_left = count - 1;
+
+ /* Process remaining messages if necessary */
+ if (num_left) {
+ ret = mxt_read_and_process_messages(data, num_left);
+ if (ret < 0)
+ goto end;
+ else if (ret != num_left)
+ dev_warn(dev, "Unexpected invalid message\n");
+ }
+
+end:
+ if (data->update_input) {
+ mxt_input_sync(data);
+ data->update_input = false;
+ }
+
+ return IRQ_HANDLED;
}
-static irqreturn_t mxt_process_messages_until_invalid(struct mxt_data *data)
+static int mxt_process_messages_until_invalid(struct mxt_data *data)
{
- struct mxt_message message;
- const u8 *payload = &message.message[0];
struct device *dev = &data->client->dev;
- u8 reportid;
- bool update_input = false;
- u32 crc;
+ int count, read;
+ u8 tries = 2;
+ count = data->max_reportid;
+
+ /* Read messages until we force an invalid */
do {
- if (mxt_read_message(data, &message)) {
- dev_err(dev, "Failed to read message\n");
- return IRQ_NONE;
- }
+ read = mxt_read_and_process_messages(data, count);
+ if (read < count)
+ return 0;
+ } while (--tries);
- reportid = message.reportid;
+ if (data->update_input) {
+ mxt_input_sync(data);
+ data->update_input = false;
+ }
- if (reportid == data->T6_reportid) {
- u8 status = payload[0];
+ dev_err(dev, "CHG pin isn't cleared\n");
+ return -EBUSY;
+}
- crc = mxt_extract_T6_csum(&payload[1]);
- if (crc != data->config_crc) {
- data->config_crc = crc;
- complete(&data->crc_completion);
- }
+static irqreturn_t mxt_process_messages(struct mxt_data *data)
+{
+ int total_handled, num_handled;
+ u8 count = data->last_message_count;
- dev_dbg(dev, "Status: %02x Config Checksum: %06x\n",
- status, data->config_crc);
-
- if (status & MXT_T6_STATUS_RESET)
- complete(&data->reset_completion);
- } else if (mxt_is_T9_message(data, &message)) {
- int id = reportid - data->T9_reportid_min;
- mxt_input_touchevent(data, &message, id);
- update_input = true;
- } else if (message.reportid == data->T19_reportid) {
- mxt_input_button(data, &message);
- update_input = true;
- } else {
- mxt_dump_message(dev, &message);
- }
- } while (reportid != 0xff);
+ if (count < 1 || count > data->max_reportid)
+ count = 1;
- if (update_input)
- mxt_input_sync(data->input_dev);
+ /* include final invalid message */
+ total_handled = mxt_read_and_process_messages(data, count + 1);
+ if (total_handled < 0)
+ return IRQ_NONE;
+ /* if there were invalid messages, then we are done */
+ else if (total_handled <= count)
+ goto update_count;
+
+ /* keep reading two msgs until one is invalid or reportid limit */
+ do {
+ num_handled = mxt_read_and_process_messages(data, 2);
+ if (num_handled < 0)
+ return IRQ_NONE;
+
+ total_handled += num_handled;
+
+ if (num_handled < 2)
+ break;
+ } while (total_handled < data->num_touchids);
+
+update_count:
+ data->last_message_count = total_handled;
+
+ if (data->update_input) {
+ mxt_input_sync(data);
+ data->update_input = false;
+ }
return IRQ_HANDLED;
}
@@ -792,7 +944,14 @@ static irqreturn_t mxt_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
- return mxt_process_messages_until_invalid(data);
+ if (!data->object_table)
+ return IRQ_HANDLED;
+
+ if (data->T44_address) {
+ return mxt_process_messages_t44(data);
+ } else {
+ return mxt_process_messages(data);
+ }
}
static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
@@ -866,78 +1025,314 @@ static void mxt_update_crc(struct mxt_data *data, u8 cmd, u8 value)
mxt_wait_for_completion(data, &data->crc_completion, MXT_CRC_TIMEOUT);
}
-static int mxt_check_reg_init(struct mxt_data *data)
+static void mxt_calc_crc24(u32 *crc, u8 firstbyte, u8 secondbyte)
+{
+ static const unsigned int crcpoly = 0x80001B;
+ u32 result;
+ u32 data_word;
+
+ data_word = (secondbyte << 8) | firstbyte;
+ result = ((*crc << 1) ^ data_word);
+
+ if (result & 0x1000000)
+ result ^= crcpoly;
+
+ *crc = result;
+}
+
+static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
+{
+ u32 crc = 0;
+ u8 *ptr = base + start_off;
+ u8 *last_val = base + end_off - 1;
+
+ if (end_off < start_off)
+ return -EINVAL;
+
+ while (ptr < last_val) {
+ mxt_calc_crc24(&crc, *ptr, *(ptr + 1));
+ ptr += 2;
+ }
+
+ /* if len is odd, fill the last byte with 0 */
+ if (ptr == last_val)
+ mxt_calc_crc24(&crc, *ptr, 0);
+
+ /* Mask to 24-bit */
+ crc &= 0x00FFFFFF;
+
+ return crc;
+}
+
+/*
+ * mxt_update_cfg - download configuration to chip
+ *
+ * Atmel Raw Config File Format
+ *
+ * The first four lines of the raw config file contain:
+ * 1) Version
+ * 2) Chip ID Information (first 7 bytes of device memory)
+ * 3) Chip Information Block 24-bit CRC Checksum
+ * 4) Chip Configuration 24-bit CRC Checksum
+ *
+ * The rest of the file consists of one line per object instance:
+ * <TYPE> <INSTANCE> <SIZE> <CONTENTS>
+ *
+ * <TYPE> - 2-byte object type as hex
+ * <INSTANCE> - 2-byte object instance number as hex
+ * <SIZE> - 2-byte object size as hex
+ * <CONTENTS> - array of <SIZE> 1-byte hex values
+ */
+static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
{
- const struct mxt_platform_data *pdata = data->pdata;
- struct mxt_object *object;
struct device *dev = &data->client->dev;
- int index = 0;
- int i, size;
+ struct mxt_info cfg_info;
+ struct mxt_object *object;
int ret;
+ int offset;
+ int data_pos;
+ int byte_offset;
+ int i;
+ int cfg_start_ofs;
+ u32 info_crc, config_crc, calculated_crc;
+ u8 *config_mem;
+ size_t config_mem_size;
+ unsigned int type, instance, size;
+ u8 val;
+ u16 reg;
- if (!pdata->config) {
- dev_dbg(dev, "No cfg data defined, skipping reg init\n");
- return 0;
+ mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
+
+ if (strncmp(cfg->data, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
+ dev_err(dev, "Unrecognised config file\n");
+ ret = -EINVAL;
+ goto release;
}
- mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
+ data_pos = strlen(MXT_CFG_MAGIC);
+
+ /* Load information block and check */
+ for (i = 0; i < sizeof(struct mxt_info); i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ (unsigned char *)&cfg_info + i,
+ &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format\n");
+ ret = -EINVAL;
+ goto release;
+ }
- if (data->config_crc == pdata->config_crc) {
- dev_info(dev, "Config CRC 0x%06X: OK\n", data->config_crc);
- return 0;
+ data_pos += offset;
}
- dev_info(dev, "Config CRC 0x%06X: does not match 0x%06X\n",
- data->config_crc, pdata->config_crc);
+ if (cfg_info.family_id != data->info.family_id) {
+ dev_err(dev, "Family ID mismatch!\n");
+ ret = -EINVAL;
+ goto release;
+ }
- for (i = 0; i < data->info.object_num; i++) {
- object = data->object_table + i;
+ if (cfg_info.variant_id != data->info.variant_id) {
+ dev_err(dev, "Variant ID mismatch!\n");
+ ret = -EINVAL;
+ goto release;
+ }
+
+ /* Read CRCs */
+ ret = sscanf(cfg->data + data_pos, "%x%n", &info_crc, &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format: failed to parse Info CRC\n");
+ ret = -EINVAL;
+ goto release;
+ }
+ data_pos += offset;
- if (!mxt_object_writable(object->type))
+ ret = sscanf(cfg->data + data_pos, "%x%n", &config_crc, &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format: failed to parse Config CRC\n");
+ ret = -EINVAL;
+ goto release;
+ }
+ data_pos += offset;
+
+ /*
+ * The Info Block CRC is calculated over mxt_info and the object
+ * table. If it does not match then we are trying to load the
+ * configuration from a different chip or firmware version, so
+ * the configuration CRC is invalid anyway.
+ */
+ if (info_crc == data->info_crc) {
+ if (config_crc == 0 || data->config_crc == 0) {
+ dev_info(dev, "CRC zero, attempting to apply config\n");
+ } else if (config_crc == data->config_crc) {
+ dev_dbg(dev, "Config CRC 0x%06X: OK\n",
+ data->config_crc);
+ ret = 0;
+ goto release;
+ } else {
+ dev_info(dev, "Config CRC 0x%06X: does not match file 0x%06X\n",
+ data->config_crc, config_crc);
+ }
+ } else {
+ dev_warn(dev,
+ "Warning: Info CRC error - device=0x%06X file=0x%06X\n",
+ data->info_crc, info_crc);
+ }
+
+ /* Malloc memory to store configuration */
+ cfg_start_ofs = MXT_OBJECT_START +
+ data->info.object_num * sizeof(struct mxt_object) +
+ MXT_INFO_CHECKSUM_SIZE;
+ config_mem_size = data->mem_size - cfg_start_ofs;
+ config_mem = kzalloc(config_mem_size, GFP_KERNEL);
+ if (!config_mem) {
+ dev_err(dev, "Failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ while (data_pos < cfg->size) {
+ /* Read type, instance, length */
+ ret = sscanf(cfg->data + data_pos, "%x %x %x%n",
+ &type, &instance, &size, &offset);
+ if (ret == 0) {
+ /* EOF */
+ break;
+ } else if (ret != 3) {
+ dev_err(dev, "Bad format: failed to parse object\n");
+ ret = -EINVAL;
+ goto release_mem;
+ }
+ data_pos += offset;
+
+ object = mxt_get_object(data, type);
+ if (!object) {
+ /* Skip object */
+ for (i = 0; i < size; i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ &val,
+ &offset);
+ data_pos += offset;
+ }
continue;
+ }
- size = mxt_obj_size(object) * mxt_obj_instances(object);
- if (index + size > pdata->config_length) {
- dev_err(dev, "Not enough config data!\n");
- return -EINVAL;
+ if (size > mxt_obj_size(object)) {
+ /*
+ * Either we are in fallback mode due to wrong
+ * config or config from a later fw version,
+ * or the file is corrupt or hand-edited.
+ */
+ dev_warn(dev, "Discarding %zu byte(s) in T%u\n",
+ size - mxt_obj_size(object), type);
+ } else if (mxt_obj_size(object) > size) {
+ /*
+ * If firmware is upgraded, new bytes may be added to
+ * end of objects. It is generally forward compatible
+ * to zero these bytes - previous behaviour will be
+ * retained. However this does invalidate the CRC and
+ * will force fallback mode until the configuration is
+ * updated. We warn here but do nothing else - the
+ * malloc has zeroed the entire configuration.
+ */
+ dev_warn(dev, "Zeroing %zu byte(s) in T%d\n",
+ mxt_obj_size(object) - size, type);
}
- ret = __mxt_write_reg(data->client, object->start_address,
- size, &pdata->config[index]);
- if (ret)
- return ret;
- index += size;
+ if (instance >= mxt_obj_instances(object)) {
+ dev_err(dev, "Object instances exceeded!\n");
+ ret = -EINVAL;
+ goto release_mem;
+ }
+
+ reg = object->start_address + mxt_obj_size(object) * instance;
+
+ for (i = 0; i < size; i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ &val,
+ &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format in T%d\n", type);
+ ret = -EINVAL;
+ goto release_mem;
+ }
+ data_pos += offset;
+
+ if (i > mxt_obj_size(object))
+ continue;
+
+ byte_offset = reg + i - cfg_start_ofs;
+
+ if ((byte_offset >= 0)
+ && (byte_offset <= config_mem_size)) {
+ *(config_mem + byte_offset) = val;
+ } else {
+ dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n",
+ reg, object->type, byte_offset);
+ ret = -EINVAL;
+ goto release_mem;
+ }
+ }
+ }
+
+ /* Calculate crc of the received configs (not the raw config file) */
+ if (data->T7_address < cfg_start_ofs) {
+ dev_err(dev, "Bad T7 address, T7addr = %x, config offset %x\n",
+ data->T7_address, cfg_start_ofs);
+ ret = 0;
+ goto release_mem;
+ }
+
+ calculated_crc = mxt_calculate_crc(config_mem,
+ data->T7_address - cfg_start_ofs,
+ config_mem_size);
+
+ if (config_crc > 0 && (config_crc != calculated_crc))
+ dev_warn(dev, "Config CRC error, calculated=%06X, file=%06X\n",
+ calculated_crc, config_crc);
+
+ /* Write configuration as blocks */
+ byte_offset = 0;
+ while (byte_offset < config_mem_size) {
+ size = config_mem_size - byte_offset;
+
+ if (size > MXT_MAX_BLOCK_WRITE)
+ size = MXT_MAX_BLOCK_WRITE;
+
+ ret = __mxt_write_reg(data->client,
+ cfg_start_ofs + byte_offset,
+ size, config_mem + byte_offset);
+ if (ret != 0) {
+ dev_err(dev, "Config write error, ret=%d\n", ret);
+ goto release_mem;
+ }
+
+ byte_offset += size;
}
mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE);
ret = mxt_soft_reset(data);
if (ret)
- return ret;
+ goto release_mem;
dev_info(dev, "Config successfully updated\n");
- return 0;
+release_mem:
+ kfree(config_mem);
+release:
+ release_firmware(cfg);
+ return ret;
}
-static int mxt_make_highchg(struct mxt_data *data)
+static int mxt_acquire_irq(struct mxt_data *data)
{
- struct device *dev = &data->client->dev;
- struct mxt_message message;
- int count = 10;
int error;
- /* Read dummy message to make high CHG pin */
- do {
- error = mxt_read_message(data, &message);
- if (error)
- return error;
- } while (message.reportid != 0xff && --count);
+ enable_irq(data->irq);
- if (!count) {
- dev_err(dev, "CHG pin isn't cleared\n");
- return -EBUSY;
- }
+ error = mxt_process_messages_until_invalid(data);
+ if (error)
+ return error;
return 0;
}
@@ -956,24 +1351,55 @@ static int mxt_get_info(struct mxt_data *data)
return 0;
}
+static void mxt_free_object_table(struct mxt_data *data)
+{
+ input_unregister_device(data->input_dev);
+ data->input_dev = NULL;
+
+ kfree(data->object_table);
+ data->object_table = NULL;
+ kfree(data->msg_buf);
+ data->msg_buf = NULL;
+ data->T5_address = 0;
+ data->T5_msg_size = 0;
+ data->T6_reportid = 0;
+ data->T7_address = 0;
+ data->T9_reportid_min = 0;
+ data->T9_reportid_max = 0;
+ data->T19_reportid = 0;
+ data->T44_address = 0;
+ data->max_reportid = 0;
+}
+
static int mxt_get_object_table(struct mxt_data *data)
{
struct i2c_client *client = data->client;
size_t table_size;
+ struct mxt_object *object_table;
int error;
int i;
u8 reportid;
+ u16 end_address;
table_size = data->info.object_num * sizeof(struct mxt_object);
+ object_table = kzalloc(table_size, GFP_KERNEL);
+ if (!object_table) {
+ dev_err(&data->client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
- data->object_table);
- if (error)
+ object_table);
+ if (error) {
+ kfree(object_table);
return error;
+ }
/* Valid Report IDs start counting from 1 */
reportid = 1;
+ data->mem_size = 0;
for (i = 0; i < data->info.object_num; i++) {
- struct mxt_object *object = data->object_table + i;
+ struct mxt_object *object = object_table + i;
u8 min_id, max_id;
le16_to_cpus(&object->start_address);
@@ -995,31 +1421,71 @@ static int mxt_get_object_table(struct mxt_data *data)
min_id, max_id);
switch (object->type) {
+ case MXT_GEN_MESSAGE_T5:
+ if (data->info.family_id == 0x80) {
+ /*
+ * On mXT224 read and discard unused CRC byte
+ * otherwise DMA reads are misaligned
+ */
+ data->T5_msg_size = mxt_obj_size(object);
+ } else {
+ /* CRC not enabled, so skip last byte */
+ data->T5_msg_size = mxt_obj_size(object) - 1;
+ }
+ data->T5_address = object->start_address;
case MXT_GEN_COMMAND_T6:
data->T6_reportid = min_id;
data->T6_address = object->start_address;
break;
+ case MXT_GEN_POWER_T7:
+ data->T7_address = object->start_address;
+ break;
case MXT_TOUCH_MULTI_T9:
data->T9_reportid_min = min_id;
data->T9_reportid_max = max_id;
+ data->num_touchids = object->num_report_ids
+ * mxt_obj_instances(object);
+ break;
+ case MXT_SPT_MESSAGECOUNT_T44:
+ data->T44_address = object->start_address;
break;
case MXT_SPT_GPIOPWM_T19:
data->T19_reportid = min_id;
break;
}
+
+ end_address = object->start_address
+ + mxt_obj_size(object) * mxt_obj_instances(object) - 1;
+
+ if (end_address >= data->mem_size)
+ data->mem_size = end_address + 1;
+ }
+
+ /* Store maximum reportid */
+ data->max_reportid = reportid;
+
+ /* If T44 exists, T5 position has to be directly after */
+ if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
+ dev_err(&client->dev, "Invalid T44 position\n");
+ error = -EINVAL;
+ goto free_object_table;
+ }
+
+ data->msg_buf = kcalloc(data->max_reportid,
+ data->T5_msg_size, GFP_KERNEL);
+ if (!data->msg_buf) {
+ dev_err(&client->dev, "Failed to allocate message buffer\n");
+ error = -ENOMEM;
+ goto free_object_table;
}
+ data->object_table = object_table;
+
return 0;
-}
-static void mxt_free_object_table(struct mxt_data *data)
-{
- kfree(data->object_table);
- data->object_table = NULL;
- data->T6_reportid = 0;
- data->T9_reportid_min = 0;
- data->T9_reportid_max = 0;
- data->T19_reportid = 0;
+free_object_table:
+ mxt_free_object_table(data);
+ return error;
}
static int mxt_read_t9_resolution(struct mxt_data *data)
@@ -1070,55 +1536,255 @@ static int mxt_read_t9_resolution(struct mxt_data *data)
return 0;
}
+static int mxt_input_open(struct input_dev *dev);
+static void mxt_input_close(struct input_dev *dev);
+
+static int mxt_initialize_t9_input_device(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ const struct mxt_platform_data *pdata = data->pdata;
+ struct input_dev *input_dev;
+ int error;
+ unsigned int num_mt_slots;
+ unsigned int mt_flags = 0;
+ int i;
+
+ error = mxt_read_t9_resolution(data);
+ if (error)
+ dev_warn(dev, "Failed to initialize T9 resolution\n");
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev->name = "Atmel maXTouch Touchscreen";
+ input_dev->phys = data->phys;
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = dev;
+ input_dev->open = mxt_input_open;
+ input_dev->close = mxt_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ if (pdata->t19_num_keys) {
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+
+ for (i = 0; i < pdata->t19_num_keys; i++)
+ if (pdata->t19_keymap[i] != KEY_RESERVED)
+ input_set_capability(input_dev, EV_KEY,
+ pdata->t19_keymap[i]);
+
+ mt_flags |= INPUT_MT_POINTER;
+
+ input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_X,
+ MXT_PIXELS_PER_MM);
+ input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
+ MXT_PIXELS_PER_MM);
+
+ input_dev->name = "Atmel maXTouch Touchpad";
+ }
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X,
+ 0, data->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y,
+ 0, data->max_y, 0, 0);
+ input_set_abs_params(input_dev, ABS_PRESSURE,
+ 0, 255, 0, 0);
+
+ /* For multi touch */
+ num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1;
+ error = input_mt_init_slots(input_dev, num_mt_slots, mt_flags);
+ if (error) {
+ dev_err(dev, "Error %d initialising slots\n", error);
+ goto err_free_mem;
+ }
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, MXT_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ 0, data->max_x, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ 0, data->max_y, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_PRESSURE,
+ 0, 255, 0, 0);
+
+ input_set_drvdata(input_dev, data);
+
+ error = input_register_device(input_dev);
+ if (error) {
+ dev_err(dev, "Error %d registering input device\n", error);
+ goto err_free_mem;
+ }
+
+ data->input_dev = input_dev;
+
+ return 0;
+
+err_free_mem:
+ input_free_device(input_dev);
+ return error;
+}
+
+static int mxt_configure_objects(struct mxt_data *data,
+ const struct firmware *cfg);
+
+static void mxt_config_cb(const struct firmware *cfg, void *ctx)
+{
+ mxt_configure_objects(ctx, cfg);
+}
+
static int mxt_initialize(struct mxt_data *data)
{
struct i2c_client *client = data->client;
- struct mxt_info *info = &data->info;
int error;
+ bool alt_bootloader_addr = false;
+ bool retry = false;
+retry_info:
error = mxt_get_info(data);
- if (error)
- return error;
+ if (error) {
+retry_bootloader:
+ error = mxt_probe_bootloader(data, alt_bootloader_addr);
+ if (error) {
+ if (alt_bootloader_addr) {
+ /* Chip is not in appmode or bootloader mode */
+ return error;
+ }
- data->object_table = kcalloc(info->object_num,
- sizeof(struct mxt_object),
- GFP_KERNEL);
- if (!data->object_table) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- return -ENOMEM;
+ dev_info(&client->dev, "Trying alternate bootloader address\n");
+ alt_bootloader_addr = true;
+ goto retry_bootloader;
+ } else {
+ if (retry) {
+ dev_err(&client->dev, "Could not recover from bootloader mode\n");
+ /*
+ * We can reflash from this state, so do not
+ * abort init
+ */
+ data->in_bootloader = true;
+ return 0;
+ }
+
+ /* Attempt to exit bootloader into app mode */
+ mxt_send_bootloader_cmd(data, false);
+ msleep(MXT_FW_RESET_TIME);
+ retry = true;
+ goto retry_info;
+ }
}
/* Get object table information */
error = mxt_get_object_table(data);
if (error) {
dev_err(&client->dev, "Error %d reading object table\n", error);
- goto err_free_object_table;
+ return error;
}
- /* Check register init values */
- error = mxt_check_reg_init(data);
- if (error) {
- dev_err(&client->dev, "Error %d initializing configuration\n",
- error);
+ mxt_acquire_irq(data);
+ if (error)
goto err_free_object_table;
+
+ request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
+ &data->client->dev, GFP_KERNEL, data,
+ mxt_config_cb);
+
+ return 0;
+
+err_free_object_table:
+ mxt_free_object_table(data);
+ return error;
+}
+
+static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+ struct t7_config *new_config;
+ struct t7_config deepsleep = { .active = 0, .idle = 0 };
+
+ if (sleep == MXT_POWER_CFG_DEEPSLEEP)
+ new_config = &deepsleep;
+ else
+ new_config = &data->t7_cfg;
+
+ error = __mxt_write_reg(data->client, data->T7_address,
+ sizeof(data->t7_cfg), new_config);
+ if (error)
+ return error;
+
+ dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
+ new_config->active, new_config->idle);
+
+ return 0;
+}
+
+static int mxt_init_t7_power_cfg(struct mxt_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+ bool retry = false;
+
+recheck:
+ error = __mxt_read_reg(data->client, data->T7_address,
+ sizeof(data->t7_cfg), &data->t7_cfg);
+ if (error)
+ return error;
+
+ if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
+ if (!retry) {
+ dev_dbg(dev, "T7 cfg zero, resetting\n");
+ mxt_soft_reset(data);
+ retry = true;
+ goto recheck;
+ } else {
+ dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
+ data->t7_cfg.active = 20;
+ data->t7_cfg.idle = 100;
+ return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+ }
}
- error = mxt_read_t9_resolution(data);
+ dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
+ data->t7_cfg.active, data->t7_cfg.idle);
+ return 0;
+}
+
+static int mxt_configure_objects(struct mxt_data *data,
+ const struct firmware *cfg)
+{
+ struct device *dev = &data->client->dev;
+ struct mxt_info *info = &data->info;
+ int error;
+
+ if (cfg) {
+ error = mxt_update_cfg(data, cfg);
+ if (error)
+ dev_warn(dev, "Error %d updating config\n", error);
+ }
+
+ error = mxt_init_t7_power_cfg(data);
if (error) {
- dev_err(&client->dev, "Failed to initialize T9 resolution\n");
- goto err_free_object_table;
+ dev_err(dev, "Failed to initialize power cfg\n");
+ return error;
}
- dev_info(&client->dev,
+ error = mxt_initialize_t9_input_device(data);
+ if (error)
+ return error;
+
+ dev_info(dev,
"Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
info->family_id, info->variant_id, info->version >> 4,
info->version & 0xf, info->build, info->object_num);
return 0;
-
-err_free_object_table:
- mxt_free_object_table(data);
- return error;
}
/* Firmware Version is returned as Major.Minor.Build */
@@ -1246,30 +1912,45 @@ static int mxt_load_fw(struct device *dev, const char *fn)
if (ret)
goto release_firmware;
- ret = mxt_lookup_bootloader_address(data);
- if (ret)
- goto release_firmware;
+ if (!data->in_bootloader) {
+ /* Change to the bootloader mode */
+ data->in_bootloader = true;
- /* Change to the bootloader mode */
- data->in_bootloader = true;
+ ret = mxt_t6_command(data, MXT_COMMAND_RESET,
+ MXT_BOOT_VALUE, false);
+ if (ret)
+ goto release_firmware;
- ret = mxt_t6_command(data, MXT_COMMAND_RESET, MXT_BOOT_VALUE, false);
- if (ret)
- goto release_firmware;
+ msleep(MXT_RESET_TIME);
- msleep(MXT_RESET_TIME);
+ /* Do not need to scan since we know family ID */
+ ret = mxt_lookup_bootloader_address(data, 0);
+ if (ret)
+ goto release_firmware;
+ } else {
+ enable_irq(data->irq);
+ }
+ mxt_free_object_table(data);
reinit_completion(&data->bl_completion);
- ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD);
- if (ret)
- goto disable_irq;
+ ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false);
+ if (ret) {
+ /* Bootloader may still be unlocked from previous attempt */
+ ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA, false);
+ if (ret)
+ goto disable_irq;
+ } else {
+ dev_info(dev, "Unlocking bootloader\n");
- /* Unlock bootloader */
- mxt_unlock_bootloader(data);
+ /* Unlock bootloader */
+ ret = mxt_send_bootloader_cmd(data, true);
+ if (ret)
+ goto disable_irq;
+ }
while (pos < fw->size) {
- ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA);
+ ret = mxt_check_bootloader(data, MXT_WAITING_FRAME_DATA, true);
if (ret)
goto disable_irq;
@@ -1283,7 +1964,7 @@ static int mxt_load_fw(struct device *dev, const char *fn)
if (ret)
goto disable_irq;
- ret = mxt_check_bootloader(data, MXT_FRAME_CRC_PASS);
+ ret = mxt_check_bootloader(data, MXT_FRAME_CRC_PASS, true);
if (ret) {
retry++;
@@ -1343,13 +2024,7 @@ static ssize_t mxt_update_fw_store(struct device *dev,
} else {
dev_info(dev, "The firmware update succeeded\n");
- mxt_free_object_table(data);
-
- mxt_initialize(data);
-
- enable_irq(data->irq);
-
- error = mxt_make_highchg(data);
+ error = mxt_initialize(data);
if (error)
return error;
}
@@ -1376,16 +2051,15 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
- /* Touch enable */
- mxt_write_object(data,
- MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
+ mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
+
+ /* Recalibrate since chip has been in deep sleep */
+ mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
}
static void mxt_stop(struct mxt_data *data)
{
- /* Touch disable */
- mxt_write_object(data,
- MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
+ mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
}
static int mxt_input_open(struct input_dev *dev)
@@ -1404,138 +2078,112 @@ static void mxt_input_close(struct input_dev *dev)
mxt_stop(data);
}
-static int mxt_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+#ifdef CONFIG_OF
+static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
+{
+ struct mxt_platform_data *pdata;
+ u32 *keymap;
+ u32 keycode;
+ int proplen, i, ret;
+
+ if (!client->dev.of_node)
+ return ERR_PTR(-ENODEV);
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_find_property(client->dev.of_node, "linux,gpio-keymap",
+ &proplen)) {
+ pdata->t19_num_keys = proplen / sizeof(u32);
+
+ keymap = devm_kzalloc(&client->dev,
+ pdata->t19_num_keys * sizeof(keymap[0]),
+ GFP_KERNEL);
+ if (!keymap)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < pdata->t19_num_keys; i++) {
+ ret = of_property_read_u32_index(client->dev.of_node,
+ "linux,gpio-keymap", i, &keycode);
+ if (ret)
+ keycode = KEY_RESERVED;
+
+ keymap[i] = keycode;
+ }
+
+ pdata->t19_keymap = keymap;
+ }
+
+ return pdata;
+}
+#else
+static struct mxt_platform_data *mxt_parse_dt(struct i2c_client *client)
+{
+ dev_dbg(&client->dev, "No platform data specified\n");
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
+static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
- const struct mxt_platform_data *pdata = dev_get_platdata(&client->dev);
struct mxt_data *data;
- struct input_dev *input_dev;
+ const struct mxt_platform_data *pdata;
int error;
- unsigned int num_mt_slots;
- unsigned int mt_flags = 0;
- int i;
- if (!pdata)
- return -EINVAL;
+ pdata = dev_get_platdata(&client->dev);
+ if (!pdata) {
+ pdata = mxt_parse_dt(client);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
data = kzalloc(sizeof(struct mxt_data), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!data || !input_dev) {
+ if (!data) {
dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
}
- input_dev->name = "Atmel maXTouch Touchscreen";
snprintf(data->phys, sizeof(data->phys), "i2c-%u-%04x/input0",
client->adapter->nr, client->addr);
- input_dev->phys = data->phys;
-
- input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
- input_dev->open = mxt_input_open;
- input_dev->close = mxt_input_close;
-
data->client = client;
- data->input_dev = input_dev;
data->pdata = pdata;
data->irq = client->irq;
+ i2c_set_clientdata(client, data);
init_completion(&data->bl_completion);
init_completion(&data->reset_completion);
init_completion(&data->crc_completion);
- error = mxt_initialize(data);
- if (error)
- goto err_free_mem;
-
- __set_bit(EV_ABS, input_dev->evbit);
- __set_bit(EV_KEY, input_dev->evbit);
- __set_bit(BTN_TOUCH, input_dev->keybit);
-
- if (pdata->t19_num_keys) {
- __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
-
- for (i = 0; i < pdata->t19_num_keys; i++)
- if (pdata->t19_keymap[i] != KEY_RESERVED)
- input_set_capability(input_dev, EV_KEY,
- pdata->t19_keymap[i]);
-
- mt_flags |= INPUT_MT_POINTER;
-
- input_abs_set_res(input_dev, ABS_X, MXT_PIXELS_PER_MM);
- input_abs_set_res(input_dev, ABS_Y, MXT_PIXELS_PER_MM);
- input_abs_set_res(input_dev, ABS_MT_POSITION_X,
- MXT_PIXELS_PER_MM);
- input_abs_set_res(input_dev, ABS_MT_POSITION_Y,
- MXT_PIXELS_PER_MM);
-
- input_dev->name = "Atmel maXTouch Touchpad";
- }
-
- /* For single touch */
- input_set_abs_params(input_dev, ABS_X,
- 0, data->max_x, 0, 0);
- input_set_abs_params(input_dev, ABS_Y,
- 0, data->max_y, 0, 0);
- input_set_abs_params(input_dev, ABS_PRESSURE,
- 0, 255, 0, 0);
-
- /* For multi touch */
- num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1;
- error = input_mt_init_slots(input_dev, num_mt_slots, mt_flags);
- if (error)
- goto err_free_object;
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, MXT_MAX_AREA, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, data->max_x, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, data->max_y, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_PRESSURE,
- 0, 255, 0, 0);
-
- input_set_drvdata(input_dev, data);
- i2c_set_clientdata(client, data);
-
error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
pdata->irqflags | IRQF_ONESHOT,
client->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_object;
+ goto err_free_mem;
}
- error = mxt_make_highchg(data);
- if (error)
- goto err_free_irq;
+ disable_irq(client->irq);
- error = input_register_device(input_dev);
- if (error) {
- dev_err(&client->dev, "Error %d registering input device\n",
- error);
+ error = mxt_initialize(data);
+ if (error)
goto err_free_irq;
- }
error = sysfs_create_group(&client->dev.kobj, &mxt_attr_group);
if (error) {
dev_err(&client->dev, "Failure %d creating sysfs group\n",
error);
- goto err_unregister_device;
+ goto err_free_object;
}
return 0;
-err_unregister_device:
- input_unregister_device(input_dev);
- input_dev = NULL;
+err_free_object:
+ mxt_free_object_table(data);
err_free_irq:
free_irq(client->irq, data);
-err_free_object:
- kfree(data->object_table);
err_free_mem:
- input_free_device(input_dev);
kfree(data);
return error;
}
@@ -1547,7 +2195,7 @@ static int mxt_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &mxt_attr_group);
free_irq(data->irq, data);
input_unregister_device(data->input_dev);
- kfree(data->object_table);
+ mxt_free_object_table(data);
kfree(data);
return 0;
@@ -1576,8 +2224,6 @@ static int mxt_resume(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
- mxt_soft_reset(data);
-
mutex_lock(&input_dev->mutex);
if (input_dev->users)
@@ -1591,6 +2237,12 @@ static int mxt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
+static const struct of_device_id mxt_of_match[] = {
+ { .compatible = "atmel,maxtouch", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mxt_of_match);
+
static const struct i2c_device_id mxt_id[] = {
{ "qt602240_ts", 0 },
{ "atmel_mxt_ts", 0 },
@@ -1604,6 +2256,7 @@ static struct i2c_driver mxt_driver = {
.driver = {
.name = "atmel_mxt_ts",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mxt_of_match),
.pm = &mxt_pm_ops,
},
.probe = mxt_probe,
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index d4f33992ad8c..5a6d50c004d7 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -733,8 +733,7 @@ edt_ft5x06_ts_prepare_debugfs(struct edt_ft5x06_ts_data *tsdata,
static void
edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
{
- if (tsdata->debug_dir)
- debugfs_remove_recursive(tsdata->debug_dir);
+ debugfs_remove_recursive(tsdata->debug_dir);
kfree(tsdata->raw_buffer);
}
diff --git a/drivers/input/touchscreen/ipaq-micro-ts.c b/drivers/input/touchscreen/ipaq-micro-ts.c
new file mode 100644
index 000000000000..62c8976e616f
--- /dev/null
+++ b/drivers/input/touchscreen/ipaq-micro-ts.c
@@ -0,0 +1,166 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * h3600 atmel micro companion support, touchscreen subdevice
+ * Author : Alessandro Gardich <gremlin@gremlin.it>
+ * Author : Dmitry Artamonow <mad_soft@inbox.ru>
+ * Author : Linus Walleij <linus.walleij@linaro.org>
+ *
+ */
+
+#include <asm/byteorder.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/ipaq-micro.h>
+
+struct touchscreen_data {
+ struct input_dev *input;
+ struct ipaq_micro *micro;
+};
+
+static void micro_ts_receive(void *data, int len, unsigned char *msg)
+{
+ struct touchscreen_data *ts = data;
+
+ if (len == 4) {
+ input_report_abs(ts->input, ABS_X,
+ be16_to_cpup((__be16 *) &msg[2]));
+ input_report_abs(ts->input, ABS_Y,
+ be16_to_cpup((__be16 *) &msg[0]));
+ input_report_key(ts->input, BTN_TOUCH, 1);
+ input_sync(ts->input);
+ } else if (len == 0) {
+ input_report_abs(ts->input, ABS_X, 0);
+ input_report_abs(ts->input, ABS_Y, 0);
+ input_report_key(ts->input, BTN_TOUCH, 0);
+ input_sync(ts->input);
+ }
+}
+
+static void micro_ts_toggle_receive(struct touchscreen_data *ts, bool enable)
+{
+ struct ipaq_micro *micro = ts->micro;
+
+ spin_lock_irq(&micro->lock);
+
+ if (enable) {
+ micro->ts = micro_ts_receive;
+ micro->ts_data = ts;
+ } else {
+ micro->ts = NULL;
+ micro->ts_data = NULL;
+ }
+
+ spin_unlock_irq(&ts->micro->lock);
+}
+
+static int micro_ts_open(struct input_dev *input)
+{
+ struct touchscreen_data *ts = input_get_drvdata(input);
+
+ micro_ts_toggle_receive(ts, true);
+
+ return 0;
+}
+
+static void micro_ts_close(struct input_dev *input)
+{
+ struct touchscreen_data *ts = input_get_drvdata(input);
+
+ micro_ts_toggle_receive(ts, false);
+}
+
+static int micro_ts_probe(struct platform_device *pdev)
+{
+ struct ipaq_micro *micro = dev_get_drvdata(pdev->dev.parent);
+ struct touchscreen_data *ts;
+ int error;
+
+ ts = devm_kzalloc(&pdev->dev, sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->micro = micro;
+
+ ts->input = devm_input_allocate_device(&pdev->dev);
+ if (!ts->input) {
+ dev_err(&pdev->dev, "failed to allocate input device\n");
+ return -ENOMEM;
+ }
+
+ ts->input->name = "ipaq micro ts";
+ ts->input->open = micro_ts_open;
+ ts->input->close = micro_ts_close;
+
+ input_set_drvdata(ts->input, ts);
+
+ input_set_capability(ts->input, EV_KEY, BTN_TOUCH);
+ input_set_capability(ts->input, EV_ABS, ABS_X);
+ input_set_capability(ts->input, EV_ABS, ABS_Y);
+ input_set_abs_params(ts->input, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(ts->input, ABS_Y, 0, 1023, 0, 0);
+
+ error = input_register_device(ts->input);
+ if (error) {
+ dev_err(&pdev->dev, "error registering touch input\n");
+ return error;
+ }
+
+ platform_set_drvdata(pdev, ts);
+
+ dev_info(&pdev->dev, "iPAQ micro touchscreen\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int micro_ts_suspend(struct device *dev)
+{
+ struct touchscreen_data *ts = dev_get_drvdata(dev);
+
+ micro_ts_toggle_receive(ts, false);
+
+ return 0;
+}
+
+static int micro_ts_resume(struct device *dev)
+{
+ struct touchscreen_data *ts = dev_get_drvdata(dev);
+ struct input_dev *input = ts->input;
+
+ mutex_lock(&input->mutex);
+
+ if (input->users)
+ micro_ts_toggle_receive(ts, true);
+
+ mutex_unlock(&input->mutex);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops micro_ts_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(micro_ts_suspend, micro_ts_resume)
+};
+
+static struct platform_driver micro_ts_device_driver = {
+ .driver = {
+ .name = "ipaq-micro-ts",
+ .pm = &micro_ts_dev_pm_ops,
+ },
+ .probe = micro_ts_probe,
+};
+module_platform_driver(micro_ts_device_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("driver for iPAQ Atmel micro touchscreen");
+MODULE_ALIAS("platform:ipaq-micro-ts");
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 7324c5c0fb86..651ec71a5c68 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -36,22 +36,21 @@ struct jornada_ts {
static void jornada720_ts_collect_data(struct jornada_ts *jornada_ts)
{
+ /* 3 low word X samples */
+ jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY);
+ jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY);
+ jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY);
- /* 3 low word X samples */
- jornada_ts->x_data[0] = jornada_ssp_byte(TXDUMMY);
- jornada_ts->x_data[1] = jornada_ssp_byte(TXDUMMY);
- jornada_ts->x_data[2] = jornada_ssp_byte(TXDUMMY);
+ /* 3 low word Y samples */
+ jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY);
+ jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY);
+ jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY);
- /* 3 low word Y samples */
- jornada_ts->y_data[0] = jornada_ssp_byte(TXDUMMY);
- jornada_ts->y_data[1] = jornada_ssp_byte(TXDUMMY);
- jornada_ts->y_data[2] = jornada_ssp_byte(TXDUMMY);
+ /* combined x samples bits */
+ jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY);
- /* combined x samples bits */
- jornada_ts->x_data[3] = jornada_ssp_byte(TXDUMMY);
-
- /* combined y samples bits */
- jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY);
+ /* combined y samples bits */
+ jornada_ts->y_data[3] = jornada_ssp_byte(TXDUMMY);
}
static int jornada720_ts_average(int coords[4])
@@ -104,13 +103,13 @@ static int jornada720_ts_probe(struct platform_device *pdev)
struct input_dev *input_dev;
int error;
- jornada_ts = kzalloc(sizeof(struct jornada_ts), GFP_KERNEL);
- input_dev = input_allocate_device();
+ jornada_ts = devm_kzalloc(&pdev->dev, sizeof(*jornada_ts), GFP_KERNEL);
+ if (!jornada_ts)
+ return -ENOMEM;
- if (!jornada_ts || !input_dev) {
- error = -ENOMEM;
- goto fail1;
- }
+ input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!input_dev)
+ return -ENOMEM;
platform_set_drvdata(pdev, jornada_ts);
@@ -126,36 +125,18 @@ static int jornada720_ts_probe(struct platform_device *pdev)
input_set_abs_params(input_dev, ABS_X, 270, 3900, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 180, 3700, 0, 0);
- error = request_irq(IRQ_GPIO9,
- jornada720_ts_interrupt,
- IRQF_TRIGGER_RISING,
- "HP7XX Touchscreen driver", pdev);
+ error = devm_request_irq(&pdev->dev, IRQ_GPIO9,
+ jornada720_ts_interrupt,
+ IRQF_TRIGGER_RISING,
+ "HP7XX Touchscreen driver", pdev);
if (error) {
- printk(KERN_INFO "HP7XX TS : Unable to acquire irq!\n");
- goto fail1;
+ dev_err(&pdev->dev, "HP7XX TS : Unable to acquire irq!\n");
+ return error;
}
error = input_register_device(jornada_ts->dev);
if (error)
- goto fail2;
-
- return 0;
-
- fail2:
- free_irq(IRQ_GPIO9, pdev);
- fail1:
- input_free_device(input_dev);
- kfree(jornada_ts);
- return error;
-}
-
-static int jornada720_ts_remove(struct platform_device *pdev)
-{
- struct jornada_ts *jornada_ts = platform_get_drvdata(pdev);
-
- free_irq(IRQ_GPIO9, pdev);
- input_unregister_device(jornada_ts->dev);
- kfree(jornada_ts);
+ return error;
return 0;
}
@@ -165,7 +146,6 @@ MODULE_ALIAS("platform:jornada_ts");
static struct platform_driver jornada720_ts_driver = {
.probe = jornada720_ts_probe,
- .remove = jornada720_ts_remove,
.driver = {
.name = "jornada_ts",
.owner = THIS_MODULE,
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index 00510a9836b3..8b47e1fecb25 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -248,8 +248,7 @@ static int mcs5000_ts_probe(struct i2c_client *client,
return 0;
}
-#ifdef CONFIG_PM
-static int mcs5000_ts_suspend(struct device *dev)
+static int __maybe_unused mcs5000_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -259,7 +258,7 @@ static int mcs5000_ts_suspend(struct device *dev)
return 0;
}
-static int mcs5000_ts_resume(struct device *dev)
+static int __maybe_unused mcs5000_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct mcs5000_ts_data *data = i2c_get_clientdata(client);
@@ -269,7 +268,6 @@ static int mcs5000_ts_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume);
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 19c6c0fdc94b..fc49c75317d1 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -23,22 +23,51 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/input/pixcir_ts.h>
#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+
+#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */
struct pixcir_i2c_ts_data {
struct i2c_client *client;
struct input_dev *input;
- const struct pixcir_ts_platform_data *chip;
+ const struct pixcir_ts_platform_data *pdata;
bool running;
+ int max_fingers; /* Max fingers supported in this instance */
+};
+
+struct pixcir_touch {
+ int x;
+ int y;
+ int id;
+};
+
+struct pixcir_report_data {
+ int num_touches;
+ struct pixcir_touch touches[PIXCIR_MAX_SLOTS];
};
-static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
+static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
+ struct pixcir_report_data *report)
{
- struct pixcir_i2c_ts_data *tsdata = data;
- u8 rdbuf[10], wrbuf[1] = { 0 };
+ u8 rdbuf[2 + PIXCIR_MAX_SLOTS * 5];
+ u8 wrbuf[1] = { 0 };
+ u8 *bufptr;
u8 touch;
- int ret;
+ int ret, i;
+ int readsize;
+ const struct pixcir_i2c_chip_data *chip = &tsdata->pdata->chip;
+
+ memset(report, 0, sizeof(struct pixcir_report_data));
+
+ i = chip->has_hw_ids ? 1 : 0;
+ readsize = 2 + tsdata->max_fingers * (4 + i);
+ if (readsize > sizeof(rdbuf))
+ readsize = sizeof(rdbuf);
ret = i2c_master_send(tsdata->client, wrbuf, sizeof(wrbuf));
if (ret != sizeof(wrbuf)) {
@@ -48,7 +77,7 @@ static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
return;
}
- ret = i2c_master_recv(tsdata->client, rdbuf, sizeof(rdbuf));
+ ret = i2c_master_recv(tsdata->client, rdbuf, readsize);
if (ret != sizeof(rdbuf)) {
dev_err(&tsdata->client->dev,
"%s: i2c_master_recv failed(), ret=%d\n",
@@ -56,45 +85,103 @@ static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
return;
}
- touch = rdbuf[0];
- if (touch) {
- u16 posx1 = (rdbuf[3] << 8) | rdbuf[2];
- u16 posy1 = (rdbuf[5] << 8) | rdbuf[4];
- u16 posx2 = (rdbuf[7] << 8) | rdbuf[6];
- u16 posy2 = (rdbuf[9] << 8) | rdbuf[8];
-
- input_report_key(tsdata->input, BTN_TOUCH, 1);
- input_report_abs(tsdata->input, ABS_X, posx1);
- input_report_abs(tsdata->input, ABS_Y, posy1);
-
- input_report_abs(tsdata->input, ABS_MT_POSITION_X, posx1);
- input_report_abs(tsdata->input, ABS_MT_POSITION_Y, posy1);
- input_mt_sync(tsdata->input);
-
- if (touch == 2) {
- input_report_abs(tsdata->input,
- ABS_MT_POSITION_X, posx2);
- input_report_abs(tsdata->input,
- ABS_MT_POSITION_Y, posy2);
- input_mt_sync(tsdata->input);
+ touch = rdbuf[0] & 0x7;
+ if (touch > tsdata->max_fingers)
+ touch = tsdata->max_fingers;
+
+ report->num_touches = touch;
+ bufptr = &rdbuf[2];
+
+ for (i = 0; i < touch; i++) {
+ report->touches[i].x = (bufptr[1] << 8) | bufptr[0];
+ report->touches[i].y = (bufptr[3] << 8) | bufptr[2];
+
+ if (chip->has_hw_ids) {
+ report->touches[i].id = bufptr[4];
+ bufptr = bufptr + 5;
+ } else {
+ bufptr = bufptr + 4;
+ }
+ }
+}
+
+static void pixcir_ts_report(struct pixcir_i2c_ts_data *ts,
+ struct pixcir_report_data *report)
+{
+ struct input_mt_pos pos[PIXCIR_MAX_SLOTS];
+ int slots[PIXCIR_MAX_SLOTS];
+ struct pixcir_touch *touch;
+ int n, i, slot;
+ struct device *dev = &ts->client->dev;
+ const struct pixcir_i2c_chip_data *chip = &ts->pdata->chip;
+
+ n = report->num_touches;
+ if (n > PIXCIR_MAX_SLOTS)
+ n = PIXCIR_MAX_SLOTS;
+
+ if (!chip->has_hw_ids) {
+ for (i = 0; i < n; i++) {
+ touch = &report->touches[i];
+ pos[i].x = touch->x;
+ pos[i].y = touch->y;
+ }
+
+ input_mt_assign_slots(ts->input, slots, pos, n);
+ }
+
+ for (i = 0; i < n; i++) {
+ touch = &report->touches[i];
+
+ if (chip->has_hw_ids) {
+ slot = input_mt_get_slot_by_key(ts->input, touch->id);
+ if (slot < 0) {
+ dev_dbg(dev, "no free slot for id 0x%x\n",
+ touch->id);
+ continue;
+ }
+ } else {
+ slot = slots[i];
}
- } else {
- input_report_key(tsdata->input, BTN_TOUCH, 0);
+
+ input_mt_slot(ts->input, slot);
+ input_mt_report_slot_state(ts->input,
+ MT_TOOL_FINGER, true);
+
+ input_event(ts->input, EV_ABS, ABS_MT_POSITION_X, touch->x);
+ input_event(ts->input, EV_ABS, ABS_MT_POSITION_Y, touch->y);
+
+ dev_dbg(dev, "%d: slot %d, x %d, y %d\n",
+ i, slot, touch->x, touch->y);
}
- input_sync(tsdata->input);
+ input_mt_sync_frame(ts->input);
+ input_sync(ts->input);
}
static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
{
struct pixcir_i2c_ts_data *tsdata = dev_id;
- const struct pixcir_ts_platform_data *pdata = tsdata->chip;
+ const struct pixcir_ts_platform_data *pdata = tsdata->pdata;
+ struct pixcir_report_data report;
while (tsdata->running) {
- pixcir_ts_poscheck(tsdata);
-
- if (gpio_get_value(pdata->gpio_attb))
+ /* parse packet */
+ pixcir_ts_parse(tsdata, &report);
+
+ /* report it */
+ pixcir_ts_report(tsdata, &report);
+
+ if (gpio_get_value(pdata->gpio_attb)) {
+ if (report.num_touches) {
+ /*
+ * Last report with no finger up?
+ * Do it now then.
+ */
+ input_mt_sync_frame(tsdata->input);
+ input_sync(tsdata->input);
+ }
break;
+ }
msleep(20);
}
@@ -323,16 +410,69 @@ unlock:
static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
+#ifdef CONFIG_OF
+static const struct of_device_id pixcir_of_match[];
+
+static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+{
+ struct pixcir_ts_platform_data *pdata;
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(pixcir_of_match), dev);
+ if (!match)
+ return ERR_PTR(-EINVAL);
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->chip = *(const struct pixcir_i2c_chip_data *)match->data;
+
+ pdata->gpio_attb = of_get_named_gpio(np, "attb-gpio", 0);
+ /* gpio_attb validity is checked in probe */
+
+ if (of_property_read_u32(np, "touchscreen-size-x", &pdata->x_max)) {
+ dev_err(dev, "Failed to get touchscreen-size-x property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ pdata->x_max -= 1;
+
+ if (of_property_read_u32(np, "touchscreen-size-y", &pdata->y_max)) {
+ dev_err(dev, "Failed to get touchscreen-size-y property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ pdata->y_max -= 1;
+
+ dev_dbg(dev, "%s: x %d, y %d, gpio %d\n", __func__,
+ pdata->x_max + 1, pdata->y_max + 1, pdata->gpio_attb);
+
+ return pdata;
+}
+#else
+static struct pixcir_ts_platform_data *pixcir_parse_dt(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif
+
static int pixcir_i2c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct pixcir_ts_platform_data *pdata =
dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
struct pixcir_i2c_ts_data *tsdata;
struct input_dev *input;
int error;
+ if (np && !pdata) {
+ pdata = pixcir_parse_dt(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ }
+
if (!pdata) {
dev_err(&client->dev, "platform data not defined\n");
return -EINVAL;
@@ -343,6 +483,11 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
return -EINVAL;
}
+ if (!pdata->chip.max_fingers) {
+ dev_err(dev, "Invalid max_fingers in pdata\n");
+ return -EINVAL;
+ }
+
tsdata = devm_kzalloc(dev, sizeof(*tsdata), GFP_KERNEL);
if (!tsdata)
return -ENOMEM;
@@ -355,7 +500,7 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
tsdata->client = client;
tsdata->input = input;
- tsdata->chip = pdata;
+ tsdata->pdata = pdata;
input->name = client->name;
input->id.bustype = BUS_I2C;
@@ -371,6 +516,20 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client,
input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+ tsdata->max_fingers = tsdata->pdata->chip.max_fingers;
+ if (tsdata->max_fingers > PIXCIR_MAX_SLOTS) {
+ tsdata->max_fingers = PIXCIR_MAX_SLOTS;
+ dev_info(dev, "Limiting maximum fingers to %d\n",
+ tsdata->max_fingers);
+ }
+
+ error = input_mt_init_slots(input, tsdata->max_fingers,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error) {
+ dev_err(dev, "Error initializing Multi-Touch slots\n");
+ return error;
+ }
+
input_set_drvdata(input, tsdata);
error = devm_gpio_request_one(dev, pdata->gpio_attb,
@@ -419,15 +578,36 @@ static int pixcir_i2c_ts_remove(struct i2c_client *client)
static const struct i2c_device_id pixcir_i2c_ts_id[] = {
{ "pixcir_ts", 0 },
+ { "pixcir_tangoc", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
+#ifdef CONFIG_OF
+static const struct pixcir_i2c_chip_data pixcir_ts_data = {
+ .max_fingers = 2,
+ /* no hw id support */
+};
+
+static const struct pixcir_i2c_chip_data pixcir_tangoc_data = {
+ .max_fingers = 5,
+ .has_hw_ids = true,
+};
+
+static const struct of_device_id pixcir_of_match[] = {
+ { .compatible = "pixcir,pixcir_ts", .data = &pixcir_ts_data },
+ { .compatible = "pixcir,pixcir_tangoc", .data = &pixcir_tangoc_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pixcir_of_match);
+#endif
+
static struct i2c_driver pixcir_i2c_ts_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "pixcir_ts",
.pm = &pixcir_dev_pm_ops,
+ .of_match_table = of_match_ptr(pixcir_of_match),
},
.probe = pixcir_i2c_ts_probe,
.remove = pixcir_i2c_ts_remove,
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 19cb247dbb86..5a69ded9b53c 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -264,7 +264,7 @@ static int s3c2410ts_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_enable(ts.clock);
+ clk_prepare_enable(ts.clock);
dev_dbg(dev, "got and enabled clocks\n");
ts.irq_tc = ret = platform_get_irq(pdev, 0);
@@ -369,7 +369,7 @@ static int s3c2410ts_remove(struct platform_device *pdev)
free_irq(ts.irq_tc, ts.input);
del_timer_sync(&touch_timer);
- clk_disable(ts.clock);
+ clk_disable_unprepare(ts.clock);
clk_put(ts.clock);
input_unregister_device(ts.input);
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index feea85b52fa8..8ba48f5eff7b 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -29,6 +29,8 @@
#include <linux/sysfs.h>
#include <linux/input/mt.h>
#include <linux/platform_data/zforce_ts.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -117,6 +119,8 @@ struct zforce_ts {
const struct zforce_ts_platdata *pdata;
char phys[32];
+ struct regulator *reg_vdd;
+
bool suspending;
bool suspended;
bool boot_complete;
@@ -690,6 +694,11 @@ static void zforce_reset(void *data)
struct zforce_ts *ts = data;
gpio_set_value(ts->pdata->gpio_rst, 0);
+
+ udelay(10);
+
+ if (!IS_ERR(ts->reg_vdd))
+ regulator_disable(ts->reg_vdd);
}
static struct zforce_ts_platdata *zforce_parse_dt(struct device *dev)
@@ -765,10 +774,32 @@ static int zforce_probe(struct i2c_client *client,
return ret;
}
+ ts->reg_vdd = devm_regulator_get_optional(&client->dev, "vdd");
+ if (IS_ERR(ts->reg_vdd)) {
+ ret = PTR_ERR(ts->reg_vdd);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ } else {
+ ret = regulator_enable(ts->reg_vdd);
+ if (ret)
+ return ret;
+
+ /*
+ * according to datasheet add 100us grace time after regular
+ * regulator enable delay.
+ */
+ udelay(100);
+ }
+
ret = devm_add_action(&client->dev, zforce_reset, ts);
if (ret) {
dev_err(&client->dev, "failed to register reset action, %d\n",
ret);
+
+ /* hereafter the regulator will be disabled by the action */
+ if (!IS_ERR(ts->reg_vdd))
+ regulator_disable(ts->reg_vdd);
+
return ret;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 4306885f48b1..60ab474bfff3 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -85,7 +85,7 @@ void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
*cnt = 0;
while (start < end) {
scope = start;
- if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI ||
+ if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
@@ -381,7 +381,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
struct acpi_dmar_andd *andd = (void *)header;
/* Check for NUL termination within the designated length */
- if (strnlen(andd->object_name, header->length - 8) == header->length - 8) {
+ if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
"Your BIOS is broken; ANDD object name is not NUL-terminated\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
@@ -391,7 +391,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
return -EINVAL;
}
pr_info("ANDD device: %x name: %s\n", andd->device_number,
- andd->object_name);
+ andd->device_name);
return 0;
}
@@ -449,17 +449,17 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
- case ACPI_DMAR_TYPE_ATSR:
+ case ACPI_DMAR_TYPE_ROOT_ATS:
atsr = container_of(header, struct acpi_dmar_atsr, header);
pr_info("ATSR flags: %#x\n", atsr->flags);
break;
- case ACPI_DMAR_HARDWARE_AFFINITY:
+ case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
rhsa = container_of(header, struct acpi_dmar_rhsa, header);
pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
(unsigned long long)rhsa->base_address,
rhsa->proximity_domain);
break;
- case ACPI_DMAR_TYPE_ANDD:
+ case ACPI_DMAR_TYPE_NAMESPACE:
/* We don't print this here because we need to sanity-check
it first. So print it in dmar_parse_one_andd() instead. */
break;
@@ -540,15 +540,15 @@ parse_dmar_table(void)
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
ret = dmar_parse_one_rmrr(entry_header);
break;
- case ACPI_DMAR_TYPE_ATSR:
+ case ACPI_DMAR_TYPE_ROOT_ATS:
ret = dmar_parse_one_atsr(entry_header);
break;
- case ACPI_DMAR_HARDWARE_AFFINITY:
+ case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
#ifdef CONFIG_ACPI_NUMA
ret = dmar_parse_one_rhsa(entry_header);
#endif
break;
- case ACPI_DMAR_TYPE_ANDD:
+ case ACPI_DMAR_TYPE_NAMESPACE:
ret = dmar_parse_one_andd(entry_header);
break;
default:
@@ -632,7 +632,7 @@ static void __init dmar_acpi_insert_dev_scope(u8 device_number,
for (scope = (void *)(drhd + 1);
(unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
scope = ((void *)scope) + scope->length) {
- if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI)
+ if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
continue;
if (scope->enumeration_id != device_number)
continue;
@@ -667,21 +667,21 @@ static int __init dmar_acpi_dev_scope_init(void)
for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
andd = ((void *)andd) + andd->header.length) {
- if (andd->header.type == ACPI_DMAR_TYPE_ANDD) {
+ if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
acpi_handle h;
struct acpi_device *adev;
if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
- andd->object_name,
+ andd->device_name,
&h))) {
pr_err("Failed to find handle for ACPI object %s\n",
- andd->object_name);
+ andd->device_name);
continue;
}
acpi_bus_get_device(h, &adev);
if (!adev) {
pr_err("Failed to get device for ACPI object %s\n",
- andd->object_name);
+ andd->device_name);
continue;
}
dmar_acpi_insert_dev_scope(andd->device_number, adev);
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 792da5ea6d12..3ded3894623c 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -35,7 +35,8 @@
#include <linux/of_iommu.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/tegra-ahb.h>
+
+#include <soc/tegra/ahb.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 4e230e7c76ee..b8632bf9a7f3 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -28,7 +28,6 @@ config ARM_VIC
config ARM_VIC_NR
int
default 4 if ARCH_S5PV210
- default 3 if ARCH_S5PC100
default 2
depends on ARM_VIC
help
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 9c1f883fc5a3..4b959e606fe8 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/arm/common/gic.c
- *
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 3ae2bb8d9cf2..ccf58548b161 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -14,6 +14,8 @@
#include <asm/exception.h>
#include <asm/mach/irq.h>
+#include "irqchip.h"
+
#define IRQ_STATUS 0x00
#define IRQ_RAW_STATUS 0x04
#define IRQ_ENABLE_SET 0x08
@@ -26,6 +28,8 @@
#define FIQ_ENABLE_SET 0x28
#define FIQ_ENABLE_CLEAR 0x2C
+#define PIC_ENABLES 0x20 /* set interrupt pass through bits */
+
/**
* struct fpga_irq_data - irq data container for the FPGA IRQ controller
* @base: memory offset in virtual memory
@@ -201,14 +205,26 @@ int __init fpga_irq_of_init(struct device_node *node,
/* Some chips are cascaded from a parent IRQ */
parent_irq = irq_of_parse_and_map(node, 0);
- if (!parent_irq)
+ if (!parent_irq) {
+ set_handle_irq(fpga_handle_irq);
parent_irq = -1;
+ }
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
writel(clear_mask, base + IRQ_ENABLE_CLEAR);
writel(clear_mask, base + FIQ_ENABLE_CLEAR);
+ /*
+ * On Versatile AB/PB, some secondary interrupts have a direct
+ * pass-thru to the primary controller for IRQs 20 and 22-31 which need
+ * to be enabled. See section 3.10 of the Versatile AB user guide.
+ */
+ if (of_device_is_compatible(node, "arm,versatile-sic"))
+ writel(0xffd00000, base + PIC_ENABLES);
+
return 0;
}
+IRQCHIP_DECLARE(arm_fpga, "arm,versatile-fpga-irq", fpga_irq_of_init);
+IRQCHIP_DECLARE(arm_fpga_sic, "arm,versatile-sic", fpga_irq_of_init);
#endif
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index a1b044e7eaad..8c96e2ddf43b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -32,14 +32,6 @@ config LEDS_88PM860X
This option enables support for on-chip LED drivers found on Marvell
Semiconductor 88PM8606 PMIC.
-config LEDS_ATMEL_PWM
- tristate "LED Support using Atmel PWM outputs"
- depends on LEDS_CLASS
- depends on ATMEL_PWM
- help
- This option enables support for LEDs driven using outputs
- of the dedicated PWM controller found on newer Atmel SOCs.
-
config LEDS_LM3530
tristate "LCD Backlight driver for LM3530"
depends on LEDS_CLASS
@@ -143,6 +135,13 @@ config LEDS_SUNFIRE
This option enables support for the Left, Middle, and Right
LEDs on the I/O and CPU boards of SunFire UltraSPARC servers.
+config LEDS_IPAQ_MICRO
+ tristate "LED Support for the Compaq iPAQ h3xxx"
+ depends on MFD_IPAQ_MICRO
+ help
+ Choose this option if you want to use the notification LED on
+ Compaq/HP iPAQ h3100 and h3600.
+
config LEDS_HP6XX
tristate "LED Support for the HP Jornada 6xx"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 79c5155199a7..d8cc5f2777de 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -6,7 +6,6 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
-obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
@@ -31,6 +30,7 @@ obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
+obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index f37d63cf726b..129729d35478 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -15,10 +15,10 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/timer.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#include "leds.h"
static struct class *leds_class;
@@ -97,9 +97,10 @@ static const struct attribute_group *led_groups[] = {
NULL,
};
-static void led_timer_function(unsigned long data)
+static void led_work_function(struct work_struct *ws)
{
- struct led_classdev *led_cdev = (void *)data;
+ struct led_classdev *led_cdev =
+ container_of(ws, struct led_classdev, blink_work.work);
unsigned long brightness;
unsigned long delay;
@@ -143,7 +144,8 @@ static void led_timer_function(unsigned long data)
}
}
- mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
+ queue_delayed_work(system_wq, &led_cdev->blink_work,
+ msecs_to_jiffies(delay));
}
static void set_brightness_delayed(struct work_struct *ws)
@@ -210,8 +212,9 @@ static const struct dev_pm_ops leds_class_dev_pm_ops = {
*/
int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
{
- led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
- "%s", led_cdev->name);
+ led_cdev->dev = device_create_with_groups(leds_class, parent, 0,
+ led_cdev, led_cdev->groups,
+ "%s", led_cdev->name);
if (IS_ERR(led_cdev->dev))
return PTR_ERR(led_cdev->dev);
@@ -230,9 +233,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed);
- init_timer(&led_cdev->blink_timer);
- led_cdev->blink_timer.function = led_timer_function;
- led_cdev->blink_timer.data = (unsigned long)led_cdev;
+ INIT_DELAYED_WORK(&led_cdev->blink_work, led_work_function);
#ifdef CONFIG_LEDS_TRIGGERS
led_trigger_set_default(led_cdev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 71b40d3bf776..4bb116867b88 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/rwsem.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#include "leds.h"
DECLARE_RWSEM(leds_list_lock);
@@ -51,7 +52,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
return;
}
- mod_timer(&led_cdev->blink_timer, jiffies + 1);
+ queue_delayed_work(system_wq, &led_cdev->blink_work, 1);
}
@@ -75,7 +76,7 @@ void led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- del_timer_sync(&led_cdev->blink_timer);
+ cancel_delayed_work_sync(&led_cdev->blink_work);
led_cdev->flags &= ~LED_BLINK_ONESHOT;
led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
@@ -90,7 +91,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev,
int invert)
{
if ((led_cdev->flags & LED_BLINK_ONESHOT) &&
- timer_pending(&led_cdev->blink_timer))
+ delayed_work_pending(&led_cdev->blink_work))
return;
led_cdev->flags |= LED_BLINK_ONESHOT;
@@ -107,7 +108,7 @@ EXPORT_SYMBOL(led_blink_set_oneshot);
void led_stop_software_blink(struct led_classdev *led_cdev)
{
- del_timer_sync(&led_cdev->blink_timer);
+ cancel_delayed_work_sync(&led_cdev->blink_work);
led_cdev->blink_delay_on = 0;
led_cdev->blink_delay_off = 0;
}
@@ -116,7 +117,7 @@ EXPORT_SYMBOL_GPL(led_stop_software_blink);
void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- /* delay brightness setting if need to stop soft-blink timer */
+ /* delay brightness setting if need to stop soft-blink work */
if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
led_cdev->delayed_set_value = brightness;
schedule_work(&led_cdev->set_brightness_work);
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
deleted file mode 100644
index 56cec8d6a2ac..000000000000
--- a/drivers/leds/leds-atmel-pwm.c
+++ /dev/null
@@ -1,149 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/io.h>
-#include <linux/atmel_pwm.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-
-
-struct pwmled {
- struct led_classdev cdev;
- struct pwm_channel pwmc;
- struct gpio_led *desc;
- u32 mult;
- u8 active_low;
-};
-
-
-/*
- * For simplicity, we use "brightness" as if it were a linear function
- * of PWM duty cycle. However, a logarithmic function of duty cycle is
- * probably a better match for perceived brightness: two is half as bright
- * as four, four is half as bright as eight, etc
- */
-static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b)
-{
- struct pwmled *led;
-
- /* update the duty cycle for the *next* period */
- led = container_of(cdev, struct pwmled, cdev);
- pwm_channel_writel(&led->pwmc, PWM_CUPD, led->mult * (unsigned) b);
-}
-
-/*
- * NOTE: we reuse the platform_data structure of GPIO leds,
- * but repurpose its "gpio" number as a PWM channel number.
- */
-static int pwmled_probe(struct platform_device *pdev)
-{
- const struct gpio_led_platform_data *pdata;
- struct pwmled *leds;
- int i;
- int status;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata || pdata->num_leds < 1)
- return -ENODEV;
-
- leds = devm_kzalloc(&pdev->dev, pdata->num_leds * sizeof(*leds),
- GFP_KERNEL);
- if (!leds)
- return -ENOMEM;
-
- for (i = 0; i < pdata->num_leds; i++) {
- struct pwmled *led = leds + i;
- const struct gpio_led *dat = pdata->leds + i;
- u32 tmp;
-
- led->cdev.name = dat->name;
- led->cdev.brightness = LED_OFF;
- led->cdev.brightness_set = pwmled_brightness;
- led->cdev.default_trigger = dat->default_trigger;
-
- led->active_low = dat->active_low;
-
- status = pwm_channel_alloc(dat->gpio, &led->pwmc);
- if (status < 0)
- goto err;
-
- /*
- * Prescale clock by 2^x, so PWM counts in low MHz.
- * Start each cycle with the LED active, so increasing
- * the duty cycle gives us more time on (== brighter).
- */
- tmp = 5;
- if (!led->active_low)
- tmp |= PWM_CPR_CPOL;
- pwm_channel_writel(&led->pwmc, PWM_CMR, tmp);
-
- /*
- * Pick a period so PWM cycles at 100+ Hz; and a multiplier
- * for scaling duty cycle: brightness * mult.
- */
- tmp = (led->pwmc.mck / (1 << 5)) / 100;
- tmp /= 255;
- led->mult = tmp;
- pwm_channel_writel(&led->pwmc, PWM_CDTY,
- led->cdev.brightness * 255);
- pwm_channel_writel(&led->pwmc, PWM_CPRD,
- LED_FULL * tmp);
-
- pwm_channel_enable(&led->pwmc);
-
- /* Hand it over to the LED framework */
- status = led_classdev_register(&pdev->dev, &led->cdev);
- if (status < 0) {
- pwm_channel_free(&led->pwmc);
- goto err;
- }
- }
-
- platform_set_drvdata(pdev, leds);
- return 0;
-
-err:
- if (i > 0) {
- for (i = i - 1; i >= 0; i--) {
- led_classdev_unregister(&leds[i].cdev);
- pwm_channel_free(&leds[i].pwmc);
- }
- }
-
- return status;
-}
-
-static int pwmled_remove(struct platform_device *pdev)
-{
- const struct gpio_led_platform_data *pdata;
- struct pwmled *leds;
- unsigned i;
-
- pdata = dev_get_platdata(&pdev->dev);
- leds = platform_get_drvdata(pdev);
-
- for (i = 0; i < pdata->num_leds; i++) {
- struct pwmled *led = leds + i;
-
- led_classdev_unregister(&led->cdev);
- pwm_channel_free(&led->pwmc);
- }
-
- return 0;
-}
-
-static struct platform_driver pwmled_driver = {
- .driver = {
- .name = "leds-atmel-pwm",
- .owner = THIS_MODULE,
- },
- /* REVISIT add suspend() and resume() methods */
- .probe = pwmled_probe,
- .remove = pwmled_remove,
-};
-
-module_platform_driver(pwmled_driver);
-
-MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:leds-atmel-pwm");
diff --git a/drivers/leds/leds-ipaq-micro.c b/drivers/leds/leds-ipaq-micro.c
new file mode 100644
index 000000000000..3776f516cd88
--- /dev/null
+++ b/drivers/leds/leds-ipaq-micro.c
@@ -0,0 +1,141 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * h3xxx atmel micro companion support, notification LED subdevice
+ *
+ * Author : Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ipaq-micro.h>
+#include <linux/leds.h>
+
+#define LED_YELLOW 0x00
+#define LED_GREEN 0x01
+
+#define LED_EN (1 << 4) /* LED ON/OFF 0:off, 1:on */
+#define LED_AUTOSTOP (1 << 5) /* LED ON/OFF auto stop set 0:disable, 1:enable */
+#define LED_ALWAYS (1 << 6) /* LED Interrupt Mask 0:No mask, 1:mask */
+
+static void micro_leds_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct ipaq_micro *micro = dev_get_drvdata(led_cdev->dev->parent->parent);
+ /*
+ * In this message:
+ * Byte 0 = LED color: 0 = yellow, 1 = green
+ * yellow LED is always ~30 blinks per minute
+ * Byte 1 = duration (flags?) appears to be ignored
+ * Byte 2 = green ontime in 1/10 sec (deciseconds)
+ * 1 = 1/10 second
+ * 0 = 256/10 second
+ * Byte 3 = green offtime in 1/10 sec (deciseconds)
+ * 1 = 1/10 second
+ * 0 = 256/10 seconds
+ */
+ struct ipaq_micro_msg msg = {
+ .id = MSG_NOTIFY_LED,
+ .tx_len = 4,
+ };
+
+ msg.tx_data[0] = LED_GREEN;
+ msg.tx_data[1] = 0;
+ if (value) {
+ msg.tx_data[2] = 0; /* Duty cycle 256 */
+ msg.tx_data[3] = 1;
+ } else {
+ msg.tx_data[2] = 1;
+ msg.tx_data[3] = 0; /* Duty cycle 256 */
+ }
+ ipaq_micro_tx_msg_sync(micro, &msg);
+}
+
+/* Maximum duty cycle in ms 256/10 sec = 25600 ms */
+#define IPAQ_LED_MAX_DUTY 25600
+
+static int micro_leds_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct ipaq_micro *micro = dev_get_drvdata(led_cdev->dev->parent->parent);
+ /*
+ * In this message:
+ * Byte 0 = LED color: 0 = yellow, 1 = green
+ * yellow LED is always ~30 blinks per minute
+ * Byte 1 = duration (flags?) appears to be ignored
+ * Byte 2 = green ontime in 1/10 sec (deciseconds)
+ * 1 = 1/10 second
+ * 0 = 256/10 second
+ * Byte 3 = green offtime in 1/10 sec (deciseconds)
+ * 1 = 1/10 second
+ * 0 = 256/10 seconds
+ */
+ struct ipaq_micro_msg msg = {
+ .id = MSG_NOTIFY_LED,
+ .tx_len = 4,
+ };
+
+ msg.tx_data[0] = LED_GREEN;
+ if (*delay_on > IPAQ_LED_MAX_DUTY ||
+ *delay_off > IPAQ_LED_MAX_DUTY)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 100;
+ *delay_off = 100;
+ }
+
+ msg.tx_data[1] = 0;
+ if (*delay_on >= IPAQ_LED_MAX_DUTY)
+ msg.tx_data[2] = 0;
+ else
+ msg.tx_data[2] = (u8) DIV_ROUND_CLOSEST(*delay_on, 100);
+ if (*delay_off >= IPAQ_LED_MAX_DUTY)
+ msg.tx_data[3] = 0;
+ else
+ msg.tx_data[3] = (u8) DIV_ROUND_CLOSEST(*delay_off, 100);
+ return ipaq_micro_tx_msg_sync(micro, &msg);
+}
+
+static struct led_classdev micro_led = {
+ .name = "led-ipaq-micro",
+ .brightness_set = micro_leds_brightness_set,
+ .blink_set = micro_leds_blink_set,
+ .flags = LED_CORE_SUSPENDRESUME,
+};
+
+static int micro_leds_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = led_classdev_register(&pdev->dev, &micro_led);
+ if (ret) {
+ dev_err(&pdev->dev, "registering led failed: %d\n", ret);
+ return ret;
+ }
+ dev_info(&pdev->dev, "iPAQ micro notification LED driver\n");
+
+ return 0;
+}
+
+static int micro_leds_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&micro_led);
+ return 0;
+}
+
+static struct platform_driver micro_leds_device_driver = {
+ .driver = {
+ .name = "ipaq-micro-leds",
+ },
+ .probe = micro_leds_probe,
+ .remove = micro_leds_remove,
+};
+module_platform_driver(micro_leds_device_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("driver for iPAQ Atmel micro leds");
+MODULE_ALIAS("platform:ipaq-micro-leds");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 652368c2ea9a..91325de3cd33 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -400,6 +400,12 @@ static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
}
static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
+static struct attribute *lm3530_attrs[] = {
+ &dev_attr_mode.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(lm3530);
+
static int lm3530_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -436,6 +442,7 @@ static int lm3530_probe(struct i2c_client *client,
drvdata->led_dev.name = LM3530_LED_DEV;
drvdata->led_dev.brightness_set = lm3530_brightness_set;
drvdata->led_dev.max_brightness = MAX_BRIGHTNESS;
+ drvdata->led_dev.groups = lm3530_groups;
i2c_set_clientdata(client, drvdata);
@@ -461,26 +468,13 @@ static int lm3530_probe(struct i2c_client *client,
return err;
}
- err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode);
- if (err < 0) {
- dev_err(&client->dev, "File device creation failed: %d\n", err);
- err = -ENODEV;
- goto err_create_file;
- }
-
return 0;
-
-err_create_file:
- led_classdev_unregister(&drvdata->led_dev);
- return err;
}
static int lm3530_remove(struct i2c_client *client)
{
struct lm3530_data *drvdata = i2c_get_clientdata(client);
- device_remove_file(drvdata->led_dev.dev, &dev_attr_mode);
-
lm3530_led_disable(drvdata);
led_classdev_unregister(&drvdata->led_dev);
return 0;
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index e2c642c1169b..cbf61a40137d 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -645,6 +645,11 @@ static struct attribute_group lm3533_led_attribute_group = {
.attrs = lm3533_led_attributes
};
+static const struct attribute_group *lm3533_led_attribute_groups[] = {
+ &lm3533_led_attribute_group,
+ NULL
+};
+
static int lm3533_led_setup(struct lm3533_led *led,
struct lm3533_led_platform_data *pdata)
{
@@ -692,6 +697,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
led->cdev.brightness_get = lm3533_led_get;
led->cdev.blink_set = lm3533_led_blink_set;
led->cdev.brightness = LED_OFF;
+ led->cdev.groups = lm3533_led_attribute_groups,
led->id = pdev->id;
mutex_init(&led->mutex);
@@ -715,25 +721,16 @@ static int lm3533_led_probe(struct platform_device *pdev)
led->cb.dev = led->cdev.dev;
- ret = sysfs_create_group(&led->cdev.dev->kobj,
- &lm3533_led_attribute_group);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to create sysfs attributes\n");
- goto err_unregister;
- }
-
ret = lm3533_led_setup(led, pdata);
if (ret)
- goto err_sysfs_remove;
+ goto err_unregister;
ret = lm3533_ctrlbank_enable(&led->cb);
if (ret)
- goto err_sysfs_remove;
+ goto err_unregister;
return 0;
-err_sysfs_remove:
- sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
err_unregister:
led_classdev_unregister(&led->cdev);
flush_work(&led->work);
@@ -748,7 +745,6 @@ static int lm3533_led_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
lm3533_ctrlbank_disable(&led->cb);
- sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
led_classdev_unregister(&led->cdev);
flush_work(&led->work);
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index 591eb5e58ae3..f5112cb2d991 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -413,6 +413,12 @@ out:
static DEVICE_ATTR(pattern, S_IWUSR, NULL, lm3556_indicator_pattern_store);
+static struct attribute *lm355x_indicator_attrs[] = {
+ &dev_attr_pattern.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(lm355x_indicator);
+
static const struct regmap_config lm355x_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -501,25 +507,18 @@ static int lm355x_probe(struct i2c_client *client,
else
chip->cdev_indicator.max_brightness = 8;
chip->cdev_indicator.brightness_set = lm355x_indicator_brightness_set;
+ /* indicator pattern control only for LM3556 */
+ if (id->driver_data == CHIP_LM3556)
+ chip->cdev_indicator.groups = lm355x_indicator_groups;
err = led_classdev_register((struct device *)
&client->dev, &chip->cdev_indicator);
if (err < 0)
goto err_create_indicator_file;
- /* indicator pattern control only for LM3554 */
- if (id->driver_data == CHIP_LM3556) {
- err =
- device_create_file(chip->cdev_indicator.dev,
- &dev_attr_pattern);
- if (err < 0)
- goto err_create_pattern_file;
- }
dev_info(&client->dev, "%s is initialized\n",
lm355x_name[id->driver_data]);
return 0;
-err_create_pattern_file:
- led_classdev_unregister(&chip->cdev_indicator);
err_create_indicator_file:
led_classdev_unregister(&chip->cdev_torch);
err_create_torch_file:
@@ -534,8 +533,6 @@ static int lm355x_remove(struct i2c_client *client)
struct lm355x_reg_data *preg = chip->regs;
regmap_write(chip->regmap, preg[REG_OPMODE].regno, 0);
- if (chip->type == CHIP_LM3556)
- device_remove_file(chip->cdev_indicator.dev, &dev_attr_pattern);
led_classdev_unregister(&chip->cdev_indicator);
flush_work(&chip->work_indicator);
led_classdev_unregister(&chip->cdev_torch);
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index ceb6b3cde6fe..d3dec0132769 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -313,6 +313,18 @@ static const struct regmap_config lm3642_regmap = {
.max_register = REG_MAX,
};
+static struct attribute *lm3642_flash_attrs[] = {
+ &dev_attr_strobe_pin.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(lm3642_flash);
+
+static struct attribute *lm3642_torch_attrs[] = {
+ &dev_attr_torch_pin.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(lm3642_torch);
+
static int lm3642_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -364,17 +376,13 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_flash.max_brightness = 16;
chip->cdev_flash.brightness_set = lm3642_strobe_brightness_set;
chip->cdev_flash.default_trigger = "flash";
+ chip->cdev_flash.groups = lm3642_flash_groups,
err = led_classdev_register((struct device *)
&client->dev, &chip->cdev_flash);
if (err < 0) {
dev_err(chip->dev, "failed to register flash\n");
goto err_out;
}
- err = device_create_file(chip->cdev_flash.dev, &dev_attr_strobe_pin);
- if (err < 0) {
- dev_err(chip->dev, "failed to create strobe-pin file\n");
- goto err_create_flash_pin_file;
- }
/* torch */
INIT_WORK(&chip->work_torch, lm3642_deferred_torch_brightness_set);
@@ -382,17 +390,13 @@ static int lm3642_probe(struct i2c_client *client,
chip->cdev_torch.max_brightness = 8;
chip->cdev_torch.brightness_set = lm3642_torch_brightness_set;
chip->cdev_torch.default_trigger = "torch";
+ chip->cdev_torch.groups = lm3642_torch_groups,
err = led_classdev_register((struct device *)
&client->dev, &chip->cdev_torch);
if (err < 0) {
dev_err(chip->dev, "failed to register torch\n");
goto err_create_torch_file;
}
- err = device_create_file(chip->cdev_torch.dev, &dev_attr_torch_pin);
- if (err < 0) {
- dev_err(chip->dev, "failed to create torch-pin file\n");
- goto err_create_torch_pin_file;
- }
/* indicator */
INIT_WORK(&chip->work_indicator,
@@ -411,12 +415,8 @@ static int lm3642_probe(struct i2c_client *client,
return 0;
err_create_indicator_file:
- device_remove_file(chip->cdev_torch.dev, &dev_attr_torch_pin);
-err_create_torch_pin_file:
led_classdev_unregister(&chip->cdev_torch);
err_create_torch_file:
- device_remove_file(chip->cdev_flash.dev, &dev_attr_strobe_pin);
-err_create_flash_pin_file:
led_classdev_unregister(&chip->cdev_flash);
err_out:
return err;
@@ -428,10 +428,8 @@ static int lm3642_remove(struct i2c_client *client)
led_classdev_unregister(&chip->cdev_indicator);
flush_work(&chip->work_indicator);
- device_remove_file(chip->cdev_torch.dev, &dev_attr_torch_pin);
led_classdev_unregister(&chip->cdev_torch);
flush_work(&chip->work_torch);
- device_remove_file(chip->cdev_flash.dev, &dev_attr_strobe_pin);
led_classdev_unregister(&chip->cdev_flash);
flush_work(&chip->work_flash);
regmap_write(chip->regmap, REG_ENABLE, 0);
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index 88317b4f7bf3..77c26bc32eed 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -127,15 +127,12 @@ static DEVICE_ATTR(led_current, S_IRUGO | S_IWUSR, lp55xx_show_current,
lp55xx_store_current);
static DEVICE_ATTR(max_current, S_IRUGO , lp55xx_show_max_current, NULL);
-static struct attribute *lp55xx_led_attributes[] = {
+static struct attribute *lp55xx_led_attrs[] = {
&dev_attr_led_current.attr,
&dev_attr_max_current.attr,
NULL,
};
-
-static struct attribute_group lp55xx_led_attr_group = {
- .attrs = lp55xx_led_attributes
-};
+ATTRIBUTE_GROUPS(lp55xx_led);
static void lp55xx_set_brightness(struct led_classdev *cdev,
enum led_brightness brightness)
@@ -176,6 +173,7 @@ static int lp55xx_init_led(struct lp55xx_led *led,
}
led->cdev.brightness_set = lp55xx_set_brightness;
+ led->cdev.groups = lp55xx_led_groups;
if (pdata->led_config[chan].name) {
led->cdev.name = pdata->led_config[chan].name;
@@ -185,24 +183,12 @@ static int lp55xx_init_led(struct lp55xx_led *led,
led->cdev.name = name;
}
- /*
- * register led class device for each channel and
- * add device attributes
- */
-
ret = led_classdev_register(dev, &led->cdev);
if (ret) {
dev_err(dev, "led register err: %d\n", ret);
return ret;
}
- ret = sysfs_create_group(&led->cdev.dev->kobj, &lp55xx_led_attr_group);
- if (ret) {
- dev_err(dev, "led sysfs err: %d\n", ret);
- led_classdev_unregister(&led->cdev);
- return ret;
- }
-
return 0;
}
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
index f449a8bdddc7..607bc2755aba 100644
--- a/drivers/leds/leds-max8997.c
+++ b/drivers/leds/leds-max8997.c
@@ -229,6 +229,12 @@ static ssize_t max8997_led_store_mode(struct device *dev,
static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode);
+static struct attribute *max8997_attrs[] = {
+ &dev_attr_mode.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(max8997);
+
static int max8997_led_probe(struct platform_device *pdev)
{
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
@@ -253,6 +259,7 @@ static int max8997_led_probe(struct platform_device *pdev)
led->cdev.brightness_set = max8997_led_brightness_set;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->cdev.brightness = 0;
+ led->cdev.groups = max8997_groups;
led->iodev = iodev;
/* initialize mode and brightness according to platform_data */
@@ -281,14 +288,6 @@ static int max8997_led_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- ret = device_create_file(led->cdev.dev, &dev_attr_mode);
- if (ret != 0) {
- dev_err(&pdev->dev,
- "failed to create file: %d\n", ret);
- led_classdev_unregister(&led->cdev);
- return ret;
- }
-
return 0;
}
@@ -296,7 +295,6 @@ static int max8997_led_remove(struct platform_device *pdev)
{
struct max8997_led *led = platform_get_drvdata(pdev);
- device_remove_file(led->cdev.dev, &dev_attr_mode);
led_classdev_unregister(&led->cdev);
return 0;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index e97f443a6e07..64fde485dcaa 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -293,10 +293,14 @@ static ssize_t netxbig_led_sata_show(struct device *dev,
static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
+static struct attribute *netxbig_led_attrs[] = {
+ &dev_attr_sata.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(netxbig_led);
+
static void delete_netxbig_led(struct netxbig_led_data *led_dat)
{
- if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
- device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
led_classdev_unregister(&led_dat->cdev);
}
@@ -306,7 +310,6 @@ create_netxbig_led(struct platform_device *pdev,
const struct netxbig_led *template)
{
struct netxbig_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret;
spin_lock_init(&led_dat->lock);
led_dat->gpio_ext = pdata->gpio_ext;
@@ -327,6 +330,12 @@ create_netxbig_led(struct platform_device *pdev,
led_dat->sata = 0;
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ /*
+ * If available, expose the SATA activity blink capability through
+ * a "sata" sysfs attribute.
+ */
+ if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+ led_dat->cdev.groups = netxbig_led_groups;
led_dat->mode_addr = template->mode_addr;
led_dat->mode_val = template->mode_val;
led_dat->bright_addr = template->bright_addr;
@@ -334,21 +343,7 @@ create_netxbig_led(struct platform_device *pdev,
led_dat->timer = pdata->timer;
led_dat->num_timer = pdata->num_timer;
- ret = led_classdev_register(&pdev->dev, &led_dat->cdev);
- if (ret < 0)
- return ret;
-
- /*
- * If available, expose the SATA activity blink capability through
- * a "sata" sysfs attribute.
- */
- if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) {
- ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata);
- if (ret)
- led_classdev_unregister(&led_dat->cdev);
- }
-
- return ret;
+ return led_classdev_register(&pdev->dev, &led_dat->cdev);
}
static int netxbig_led_probe(struct platform_device *pdev)
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index efa625883c83..231993d1fe21 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -185,6 +185,12 @@ static ssize_t ns2_led_sata_show(struct device *dev,
static DEVICE_ATTR(sata, 0644, ns2_led_sata_show, ns2_led_sata_store);
+static struct attribute *ns2_led_attrs[] = {
+ &dev_attr_sata.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(ns2_led);
+
static int
create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
const struct ns2_led *template)
@@ -219,6 +225,7 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
led_dat->cdev.blink_set = NULL;
led_dat->cdev.brightness_set = ns2_led_set;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led_dat->cdev.groups = ns2_led_groups;
led_dat->cmd = template->cmd;
led_dat->slow = template->slow;
@@ -235,20 +242,11 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
if (ret < 0)
return ret;
- ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata);
- if (ret < 0)
- goto err_free_cdev;
-
return 0;
-
-err_free_cdev:
- led_classdev_unregister(&led_dat->cdev);
- return ret;
}
static void delete_ns2_led(struct ns2_led_data *led_dat)
{
- device_remove_file(led_dat->cdev.dev, &dev_attr_sata);
led_classdev_unregister(&led_dat->cdev);
}
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index 82589c0a5689..f110b4c456ba 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -12,7 +12,7 @@
* directory of this archive for more details.
*
* LED driver for the PCA9633 I2C LED driver (7-bit slave address 0x62)
- * LED driver for the PCA9634 I2C LED driver (7-bit slave address set by hw.)
+ * LED driver for the PCA9634/5 I2C LED driver (7-bit slave address set by hw.)
*
* Note that hardware blinking violates the leds infrastructure driver
* interface since the hardware only supports blinking all LEDs with the
@@ -52,6 +52,7 @@
enum pca963x_type {
pca9633,
pca9634,
+ pca9635,
};
struct pca963x_chipdef {
@@ -74,6 +75,12 @@ static struct pca963x_chipdef pca963x_chipdefs[] = {
.ledout_base = 0xc,
.n_leds = 8,
},
+ [pca9635] = {
+ .grppwm = 0x12,
+ .grpfreq = 0x13,
+ .ledout_base = 0x14,
+ .n_leds = 16,
+ },
};
/* Total blink period in milliseconds */
@@ -84,6 +91,7 @@ static const struct i2c_device_id pca963x_id[] = {
{ "pca9632", pca9633 },
{ "pca9633", pca9633 },
{ "pca9634", pca9634 },
+ { "pca9635", pca9635 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pca963x_id);
@@ -107,7 +115,7 @@ struct pca963x_led {
struct work_struct work;
enum led_brightness brightness;
struct led_classdev led_cdev;
- int led_num; /* 0 .. 7 potentially */
+ int led_num; /* 0 .. 15 potentially */
enum pca963x_cmd cmd;
char name[32];
u8 gdc;
@@ -321,6 +329,7 @@ static const struct of_device_id of_pca963x_match[] = {
{ .compatible = "nxp,pca9632", },
{ .compatible = "nxp,pca9633", },
{ .compatible = "nxp,pca9634", },
+ { .compatible = "nxp,pca9635", },
{},
};
#else
@@ -375,9 +384,8 @@ static int pca963x_probe(struct i2c_client *client,
pca963x_chip->leds = pca963x;
/* Turn off LEDs by default*/
- i2c_smbus_write_byte_data(client, chip->ledout_base, 0x00);
- if (chip->n_leds > 4)
- i2c_smbus_write_byte_data(client, chip->ledout_base + 1, 0x00);
+ for (i = 0; i < chip->n_leds / 4; i++)
+ i2c_smbus_write_byte_data(client, chip->ledout_base + i, 0x00);
for (i = 0; i < chip->n_leds; i++) {
pca963x[i].led_num = i;
@@ -415,9 +423,13 @@ static int pca963x_probe(struct i2c_client *client,
/* Disable LED all-call address and set normal mode */
i2c_smbus_write_byte_data(client, PCA963X_MODE1, 0x00);
- /* Configure output: open-drain or totem pole (push-pull) */
- if (pdata && pdata->outdrv == PCA963X_OPEN_DRAIN)
- i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x01);
+ if (pdata) {
+ /* Configure output: open-drain or totem pole (push-pull) */
+ if (pdata->outdrv == PCA963X_OPEN_DRAIN)
+ i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x01);
+ else
+ i2c_smbus_write_byte_data(client, PCA963X_MODE2, 0x05);
+ }
return 0;
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 2eb3ef62962b..046cb7008745 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -469,6 +469,12 @@ static ssize_t nas_led_blink_store(struct device *dev,
static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store);
+static struct attribute *nasgpio_led_attrs[] = {
+ &dev_attr_blink.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(nasgpio_led);
+
static int register_nasgpio_led(int led_nr)
{
int ret;
@@ -481,20 +487,18 @@ static int register_nasgpio_led(int led_nr)
led->brightness = LED_FULL;
led->brightness_set = nasgpio_led_set_brightness;
led->blink_set = nasgpio_led_set_blink;
+ led->groups = nasgpio_led_groups;
ret = led_classdev_register(&nas_gpio_pci_dev->dev, led);
if (ret)
return ret;
- ret = device_create_file(led->dev, &dev_attr_blink);
- if (ret)
- led_classdev_unregister(led);
- return ret;
+
+ return 0;
}
static void unregister_nasgpio_led(int led_nr)
{
struct led_classdev *led = get_classdev_for_led_nr(led_nr);
led_classdev_unregister(led);
- device_remove_file(led->dev, &dev_attr_blink);
}
/*
* module load/initialization
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index e72c974142d0..1b71e0701002 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -219,6 +219,12 @@ static ssize_t wm831x_status_src_store(struct device *dev,
static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
+static struct attribute *wm831x_status_attrs[] = {
+ &dev_attr_src.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(wm831x_status);
+
static int wm831x_status_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
@@ -232,8 +238,7 @@ static int wm831x_status_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_REG, 0);
if (res == NULL) {
dev_err(&pdev->dev, "No register resource\n");
- ret = -EINVAL;
- goto err;
+ return -EINVAL;
}
drvdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_status),
@@ -284,31 +289,21 @@ static int wm831x_status_probe(struct platform_device *pdev)
drvdata->cdev.default_trigger = pdata.default_trigger;
drvdata->cdev.brightness_set = wm831x_status_set;
drvdata->cdev.blink_set = wm831x_status_blink_set;
+ drvdata->cdev.groups = wm831x_status_groups;
ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
- goto err_led;
+ return ret;
}
- ret = device_create_file(drvdata->cdev.dev, &dev_attr_src);
- if (ret != 0)
- dev_err(&pdev->dev,
- "No source control for LED: %d\n", ret);
-
return 0;
-
-err_led:
- led_classdev_unregister(&drvdata->cdev);
-err:
- return ret;
}
static int wm831x_status_remove(struct platform_device *pdev)
{
struct wm831x_status *drvdata = platform_get_drvdata(pdev);
- device_remove_file(drvdata->cdev.dev, &dev_attr_src);
led_classdev_unregister(&drvdata->cdev);
return 0;
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index 0bf1e4edf04d..6590558d1d31 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -42,7 +42,6 @@ DEFINE_MUTEX(lguest_lock);
static __init int map_switcher(void)
{
int i, err;
- struct page **pagep;
/*
* Map the Switcher in to high memory.
@@ -110,11 +109,9 @@ static __init int map_switcher(void)
* This code actually sets up the pages we've allocated to appear at
* switcher_addr. map_vm_area() takes the vma we allocated above, the
* kind of pages we're mapping (kernel pages), and a pointer to our
- * array of struct pages. It increments that pointer, but we don't
- * care.
+ * array of struct pages.
*/
- pagep = lg_switcher_pages;
- err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep);
+ err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, lg_switcher_pages);
if (err) {
printk("lguest: map_vm_area failed: %i\n", err);
goto free_vma;
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index b1d91170ded0..6f68537c93ce 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -110,13 +110,7 @@ static int pmu_backlight_update_status(struct backlight_device *bd)
}
-static int pmu_backlight_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static const struct backlight_ops pmu_backlight_data = {
- .get_brightness = pmu_backlight_get_brightness,
.update_status = pmu_backlight_update_status,
};
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c8b5c13bcd05..9fd9c6717e0c 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -16,26 +16,9 @@ config PL320_MBOX
Management Engine, primarily for cpufreq. Say Y here if you want
to use the PL320 IPCM support.
-config OMAP_MBOX
- tristate
- help
- This option is selected by any OMAP architecture specific mailbox
- driver such as CONFIG_OMAP1_MBOX or CONFIG_OMAP2PLUS_MBOX. This
- enables the common OMAP mailbox framework code.
-
-config OMAP1_MBOX
- tristate "OMAP1 Mailbox framework support"
- depends on ARCH_OMAP1
- select OMAP_MBOX
- help
- Mailbox implementation for OMAP chips with hardware for
- interprocessor communication involving DSP in OMAP1. Say Y here
- if you want to use OMAP1 Mailbox framework support.
-
config OMAP2PLUS_MBOX
tristate "OMAP2+ Mailbox framework support"
depends on ARCH_OMAP2PLUS
- select OMAP_MBOX
help
Mailbox implementation for OMAP family chips with hardware for
interprocessor communication involving DSP, IVA1.0 and IVA2 in
@@ -44,7 +27,7 @@ config OMAP2PLUS_MBOX
config OMAP_MBOX_KFIFO_SIZE
int "Mailbox kfifo default buffer size (bytes)"
- depends on OMAP2PLUS_MBOX || OMAP1_MBOX
+ depends on OMAP2PLUS_MBOX
default 256
help
Specify the default size of mailbox's kfifo buffers (bytes).
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index e0facb34084a..6d184dbcaca8 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1,7 +1,3 @@
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
-obj-$(CONFIG_OMAP_MBOX) += omap-mailbox.o
-obj-$(CONFIG_OMAP1_MBOX) += mailbox_omap1.o
-mailbox_omap1-objs := mailbox-omap1.o
-obj-$(CONFIG_OMAP2PLUS_MBOX) += mailbox_omap2.o
-mailbox_omap2-objs := mailbox-omap2.o
+obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/mailbox-omap1.c b/drivers/mailbox/mailbox-omap1.c
deleted file mode 100644
index 9001b7633f10..000000000000
--- a/drivers/mailbox/mailbox-omap1.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Mailbox reservation modules for OMAP1
- *
- * Copyright (C) 2006-2009 Nokia Corporation
- * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "omap-mbox.h"
-
-#define MAILBOX_ARM2DSP1 0x00
-#define MAILBOX_ARM2DSP1b 0x04
-#define MAILBOX_DSP2ARM1 0x08
-#define MAILBOX_DSP2ARM1b 0x0c
-#define MAILBOX_DSP2ARM2 0x10
-#define MAILBOX_DSP2ARM2b 0x14
-#define MAILBOX_ARM2DSP1_Flag 0x18
-#define MAILBOX_DSP2ARM1_Flag 0x1c
-#define MAILBOX_DSP2ARM2_Flag 0x20
-
-static void __iomem *mbox_base;
-
-struct omap_mbox1_fifo {
- unsigned long cmd;
- unsigned long data;
- unsigned long flag;
-};
-
-struct omap_mbox1_priv {
- struct omap_mbox1_fifo tx_fifo;
- struct omap_mbox1_fifo rx_fifo;
-};
-
-static inline int mbox_read_reg(size_t ofs)
-{
- return __raw_readw(mbox_base + ofs);
-}
-
-static inline void mbox_write_reg(u32 val, size_t ofs)
-{
- __raw_writew(val, mbox_base + ofs);
-}
-
-/* msg */
-static mbox_msg_t omap1_mbox_fifo_read(struct omap_mbox *mbox)
-{
- struct omap_mbox1_fifo *fifo =
- &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
- mbox_msg_t msg;
-
- msg = mbox_read_reg(fifo->data);
- msg |= ((mbox_msg_t) mbox_read_reg(fifo->cmd)) << 16;
-
- return msg;
-}
-
-static void
-omap1_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
-{
- struct omap_mbox1_fifo *fifo =
- &((struct omap_mbox1_priv *)mbox->priv)->tx_fifo;
-
- mbox_write_reg(msg & 0xffff, fifo->data);
- mbox_write_reg(msg >> 16, fifo->cmd);
-}
-
-static int omap1_mbox_fifo_empty(struct omap_mbox *mbox)
-{
- return 0;
-}
-
-static int omap1_mbox_fifo_full(struct omap_mbox *mbox)
-{
- struct omap_mbox1_fifo *fifo =
- &((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
-
- return mbox_read_reg(fifo->flag);
-}
-
-/* irq */
-static void
-omap1_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- if (irq == IRQ_RX)
- enable_irq(mbox->irq);
-}
-
-static void
-omap1_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- if (irq == IRQ_RX)
- disable_irq(mbox->irq);
-}
-
-static int
-omap1_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- if (irq == IRQ_TX)
- return 0;
- return 1;
-}
-
-static struct omap_mbox_ops omap1_mbox_ops = {
- .type = OMAP_MBOX_TYPE1,
- .fifo_read = omap1_mbox_fifo_read,
- .fifo_write = omap1_mbox_fifo_write,
- .fifo_empty = omap1_mbox_fifo_empty,
- .fifo_full = omap1_mbox_fifo_full,
- .enable_irq = omap1_mbox_enable_irq,
- .disable_irq = omap1_mbox_disable_irq,
- .is_irq = omap1_mbox_is_irq,
-};
-
-/* FIXME: the following struct should be created automatically by the user id */
-
-/* DSP */
-static struct omap_mbox1_priv omap1_mbox_dsp_priv = {
- .tx_fifo = {
- .cmd = MAILBOX_ARM2DSP1b,
- .data = MAILBOX_ARM2DSP1,
- .flag = MAILBOX_ARM2DSP1_Flag,
- },
- .rx_fifo = {
- .cmd = MAILBOX_DSP2ARM1b,
- .data = MAILBOX_DSP2ARM1,
- .flag = MAILBOX_DSP2ARM1_Flag,
- },
-};
-
-static struct omap_mbox mbox_dsp_info = {
- .name = "dsp",
- .ops = &omap1_mbox_ops,
- .priv = &omap1_mbox_dsp_priv,
-};
-
-static struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL };
-
-static int omap1_mbox_probe(struct platform_device *pdev)
-{
- struct resource *mem;
- int ret;
- struct omap_mbox **list;
-
- list = omap1_mboxes;
- list[0]->irq = platform_get_irq_byname(pdev, "dsp");
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENOENT;
-
- mbox_base = ioremap(mem->start, resource_size(mem));
- if (!mbox_base)
- return -ENOMEM;
-
- ret = omap_mbox_register(&pdev->dev, list);
- if (ret) {
- iounmap(mbox_base);
- return ret;
- }
-
- return 0;
-}
-
-static int omap1_mbox_remove(struct platform_device *pdev)
-{
- omap_mbox_unregister();
- iounmap(mbox_base);
- return 0;
-}
-
-static struct platform_driver omap1_mbox_driver = {
- .probe = omap1_mbox_probe,
- .remove = omap1_mbox_remove,
- .driver = {
- .name = "omap-mailbox",
- },
-};
-
-static int __init omap1_mbox_init(void)
-{
- return platform_driver_register(&omap1_mbox_driver);
-}
-
-static void __exit omap1_mbox_exit(void)
-{
- platform_driver_unregister(&omap1_mbox_driver);
-}
-
-module_init(omap1_mbox_init);
-module_exit(omap1_mbox_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions");
-MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
-MODULE_ALIAS("platform:omap1-mailbox");
diff --git a/drivers/mailbox/mailbox-omap2.c b/drivers/mailbox/mailbox-omap2.c
deleted file mode 100644
index 42d2b893ea67..000000000000
--- a/drivers/mailbox/mailbox-omap2.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Mailbox reservation modules for OMAP2/3
- *
- * Copyright (C) 2006-2009 Nokia Corporation
- * Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
- * and Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/pm_runtime.h>
-#include <linux/platform_data/mailbox-omap.h>
-
-#include "omap-mbox.h"
-
-#define MAILBOX_REVISION 0x000
-#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
-#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
-#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
-#define MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
-#define MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
-
-#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
-#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
-#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
-
-#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
-#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
-
-#define MBOX_REG_SIZE 0x120
-
-#define OMAP4_MBOX_REG_SIZE 0x130
-
-#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
-#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
-
-static void __iomem *mbox_base;
-
-struct omap_mbox2_fifo {
- unsigned long msg;
- unsigned long fifo_stat;
- unsigned long msg_stat;
-};
-
-struct omap_mbox2_priv {
- struct omap_mbox2_fifo tx_fifo;
- struct omap_mbox2_fifo rx_fifo;
- unsigned long irqenable;
- unsigned long irqstatus;
- u32 newmsg_bit;
- u32 notfull_bit;
- u32 ctx[OMAP4_MBOX_NR_REGS];
- unsigned long irqdisable;
- u32 intr_type;
-};
-
-static inline unsigned int mbox_read_reg(size_t ofs)
-{
- return __raw_readl(mbox_base + ofs);
-}
-
-static inline void mbox_write_reg(u32 val, size_t ofs)
-{
- __raw_writel(val, mbox_base + ofs);
-}
-
-/* Mailbox H/W preparations */
-static int omap2_mbox_startup(struct omap_mbox *mbox)
-{
- u32 l;
-
- pm_runtime_enable(mbox->dev->parent);
- pm_runtime_get_sync(mbox->dev->parent);
-
- l = mbox_read_reg(MAILBOX_REVISION);
- pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
-
- return 0;
-}
-
-static void omap2_mbox_shutdown(struct omap_mbox *mbox)
-{
- pm_runtime_put_sync(mbox->dev->parent);
- pm_runtime_disable(mbox->dev->parent);
-}
-
-/* Mailbox FIFO handle functions */
-static mbox_msg_t omap2_mbox_fifo_read(struct omap_mbox *mbox)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
- return (mbox_msg_t) mbox_read_reg(fifo->msg);
-}
-
-static void omap2_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
- mbox_write_reg(msg, fifo->msg);
-}
-
-static int omap2_mbox_fifo_empty(struct omap_mbox *mbox)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->rx_fifo;
- return (mbox_read_reg(fifo->msg_stat) == 0);
-}
-
-static int omap2_mbox_fifo_full(struct omap_mbox *mbox)
-{
- struct omap_mbox2_fifo *fifo =
- &((struct omap_mbox2_priv *)mbox->priv)->tx_fifo;
- return mbox_read_reg(fifo->fifo_stat);
-}
-
-/* Mailbox IRQ handle functions */
-static void omap2_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-
- l = mbox_read_reg(p->irqenable);
- l |= bit;
- mbox_write_reg(l, p->irqenable);
-}
-
-static void omap2_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-
- /*
- * Read and update the interrupt configuration register for pre-OMAP4.
- * OMAP4 and later SoCs have a dedicated interrupt disabling register.
- */
- if (!p->intr_type)
- bit = mbox_read_reg(p->irqdisable) & ~bit;
-
- mbox_write_reg(bit, p->irqdisable);
-}
-
-static void omap2_mbox_ack_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
-
- mbox_write_reg(bit, p->irqstatus);
-
- /* Flush posted write for irq status to avoid spurious interrupts */
- mbox_read_reg(p->irqstatus);
-}
-
-static int omap2_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
-{
- struct omap_mbox2_priv *p = mbox->priv;
- u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
- u32 enable = mbox_read_reg(p->irqenable);
- u32 status = mbox_read_reg(p->irqstatus);
-
- return (int)(enable & status & bit);
-}
-
-static void omap2_mbox_save_ctx(struct omap_mbox *mbox)
-{
- int i;
- struct omap_mbox2_priv *p = mbox->priv;
- int nr_regs;
-
- if (p->intr_type)
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- p->ctx[i] = mbox_read_reg(i * sizeof(u32));
-
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, p->ctx[i]);
- }
-}
-
-static void omap2_mbox_restore_ctx(struct omap_mbox *mbox)
-{
- int i;
- struct omap_mbox2_priv *p = mbox->priv;
- int nr_regs;
-
- if (p->intr_type)
- nr_regs = OMAP4_MBOX_NR_REGS;
- else
- nr_regs = MBOX_NR_REGS;
- for (i = 0; i < nr_regs; i++) {
- mbox_write_reg(p->ctx[i], i * sizeof(u32));
-
- dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
- i, p->ctx[i]);
- }
-}
-
-static struct omap_mbox_ops omap2_mbox_ops = {
- .type = OMAP_MBOX_TYPE2,
- .startup = omap2_mbox_startup,
- .shutdown = omap2_mbox_shutdown,
- .fifo_read = omap2_mbox_fifo_read,
- .fifo_write = omap2_mbox_fifo_write,
- .fifo_empty = omap2_mbox_fifo_empty,
- .fifo_full = omap2_mbox_fifo_full,
- .enable_irq = omap2_mbox_enable_irq,
- .disable_irq = omap2_mbox_disable_irq,
- .ack_irq = omap2_mbox_ack_irq,
- .is_irq = omap2_mbox_is_irq,
- .save_ctx = omap2_mbox_save_ctx,
- .restore_ctx = omap2_mbox_restore_ctx,
-};
-
-static int omap2_mbox_probe(struct platform_device *pdev)
-{
- struct resource *mem;
- int ret;
- struct omap_mbox **list, *mbox, *mboxblk;
- struct omap_mbox2_priv *priv, *privblk;
- struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
- struct omap_mbox_dev_info *info;
- int i;
-
- if (!pdata || !pdata->info_cnt || !pdata->info) {
- pr_err("%s: platform not supported\n", __func__);
- return -ENODEV;
- }
-
- /* allocate one extra for marking end of list */
- list = kzalloc((pdata->info_cnt + 1) * sizeof(*list), GFP_KERNEL);
- if (!list)
- return -ENOMEM;
-
- mboxblk = mbox = kzalloc(pdata->info_cnt * sizeof(*mbox), GFP_KERNEL);
- if (!mboxblk) {
- ret = -ENOMEM;
- goto free_list;
- }
-
- privblk = priv = kzalloc(pdata->info_cnt * sizeof(*priv), GFP_KERNEL);
- if (!privblk) {
- ret = -ENOMEM;
- goto free_mboxblk;
- }
-
- info = pdata->info;
- for (i = 0; i < pdata->info_cnt; i++, info++, priv++) {
- priv->tx_fifo.msg = MAILBOX_MESSAGE(info->tx_id);
- priv->tx_fifo.fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id);
- priv->rx_fifo.msg = MAILBOX_MESSAGE(info->rx_id);
- priv->rx_fifo.msg_stat = MAILBOX_MSGSTATUS(info->rx_id);
- priv->notfull_bit = MAILBOX_IRQ_NOTFULL(info->tx_id);
- priv->newmsg_bit = MAILBOX_IRQ_NEWMSG(info->rx_id);
- if (pdata->intr_type) {
- priv->irqenable = OMAP4_MAILBOX_IRQENABLE(info->usr_id);
- priv->irqstatus = OMAP4_MAILBOX_IRQSTATUS(info->usr_id);
- priv->irqdisable =
- OMAP4_MAILBOX_IRQENABLE_CLR(info->usr_id);
- } else {
- priv->irqenable = MAILBOX_IRQENABLE(info->usr_id);
- priv->irqstatus = MAILBOX_IRQSTATUS(info->usr_id);
- priv->irqdisable = MAILBOX_IRQENABLE(info->usr_id);
- }
- priv->intr_type = pdata->intr_type;
-
- mbox->priv = priv;
- mbox->name = info->name;
- mbox->ops = &omap2_mbox_ops;
- mbox->irq = platform_get_irq(pdev, info->irq_id);
- if (mbox->irq < 0) {
- ret = mbox->irq;
- goto free_privblk;
- }
- list[i] = mbox++;
- }
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- ret = -ENOENT;
- goto free_privblk;
- }
-
- mbox_base = ioremap(mem->start, resource_size(mem));
- if (!mbox_base) {
- ret = -ENOMEM;
- goto free_privblk;
- }
-
- ret = omap_mbox_register(&pdev->dev, list);
- if (ret)
- goto unmap_mbox;
- platform_set_drvdata(pdev, list);
-
- return 0;
-
-unmap_mbox:
- iounmap(mbox_base);
-free_privblk:
- kfree(privblk);
-free_mboxblk:
- kfree(mboxblk);
-free_list:
- kfree(list);
- return ret;
-}
-
-static int omap2_mbox_remove(struct platform_device *pdev)
-{
- struct omap_mbox2_priv *privblk;
- struct omap_mbox **list = platform_get_drvdata(pdev);
- struct omap_mbox *mboxblk = list[0];
-
- privblk = mboxblk->priv;
- omap_mbox_unregister();
- iounmap(mbox_base);
- kfree(privblk);
- kfree(mboxblk);
- kfree(list);
-
- return 0;
-}
-
-static struct platform_driver omap2_mbox_driver = {
- .probe = omap2_mbox_probe,
- .remove = omap2_mbox_remove,
- .driver = {
- .name = "omap-mailbox",
- },
-};
-
-static int __init omap2_mbox_init(void)
-{
- return platform_driver_register(&omap2_mbox_driver);
-}
-
-static void __exit omap2_mbox_exit(void)
-{
- platform_driver_unregister(&omap2_mbox_driver);
-}
-
-module_init(omap2_mbox_init);
-module_exit(omap2_mbox_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("omap mailbox: omap2/3/4 architecture specific functions");
-MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
-MODULE_AUTHOR("Paul Mundt");
-MODULE_ALIAS("platform:omap2-mailbox");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index d79a646b9042..a27e00e63a8a 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -2,8 +2,10 @@
* OMAP mailbox driver
*
* Copyright (C) 2006-2009 Nokia Corporation. All rights reserved.
+ * Copyright (C) 2013-2014 Texas Instruments Inc.
*
* Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
+ * Suman Anna <s-anna@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -24,70 +26,164 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/notifier.h>
#include <linux/module.h>
-
-#include "omap-mbox.h"
-
-static struct omap_mbox **mboxes;
-
-static int mbox_configured;
-static DEFINE_MUTEX(mbox_configured_lock);
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/mailbox-omap.h>
+#include <linux/omap-mailbox.h>
+
+#define MAILBOX_REVISION 0x000
+#define MAILBOX_MESSAGE(m) (0x040 + 4 * (m))
+#define MAILBOX_FIFOSTATUS(m) (0x080 + 4 * (m))
+#define MAILBOX_MSGSTATUS(m) (0x0c0 + 4 * (m))
+
+#define OMAP2_MAILBOX_IRQSTATUS(u) (0x100 + 8 * (u))
+#define OMAP2_MAILBOX_IRQENABLE(u) (0x104 + 8 * (u))
+
+#define OMAP4_MAILBOX_IRQSTATUS(u) (0x104 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE(u) (0x108 + 0x10 * (u))
+#define OMAP4_MAILBOX_IRQENABLE_CLR(u) (0x10c + 0x10 * (u))
+
+#define MAILBOX_IRQSTATUS(type, u) (type ? OMAP4_MAILBOX_IRQSTATUS(u) : \
+ OMAP2_MAILBOX_IRQSTATUS(u))
+#define MAILBOX_IRQENABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE(u) : \
+ OMAP2_MAILBOX_IRQENABLE(u))
+#define MAILBOX_IRQDISABLE(type, u) (type ? OMAP4_MAILBOX_IRQENABLE_CLR(u) \
+ : OMAP2_MAILBOX_IRQENABLE(u))
+
+#define MAILBOX_IRQ_NEWMSG(m) (1 << (2 * (m)))
+#define MAILBOX_IRQ_NOTFULL(m) (1 << (2 * (m) + 1))
+
+#define MBOX_REG_SIZE 0x120
+
+#define OMAP4_MBOX_REG_SIZE 0x130
+
+#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
+#define OMAP4_MBOX_NR_REGS (OMAP4_MBOX_REG_SIZE / sizeof(u32))
+
+struct omap_mbox_fifo {
+ unsigned long msg;
+ unsigned long fifo_stat;
+ unsigned long msg_stat;
+ unsigned long irqenable;
+ unsigned long irqstatus;
+ unsigned long irqdisable;
+ u32 intr_bit;
+};
+
+struct omap_mbox_queue {
+ spinlock_t lock;
+ struct kfifo fifo;
+ struct work_struct work;
+ struct tasklet_struct tasklet;
+ struct omap_mbox *mbox;
+ bool full;
+};
+
+struct omap_mbox_device {
+ struct device *dev;
+ struct mutex cfg_lock;
+ void __iomem *mbox_base;
+ u32 num_users;
+ u32 num_fifos;
+ struct omap_mbox **mboxes;
+ struct list_head elem;
+};
+
+struct omap_mbox {
+ const char *name;
+ int irq;
+ struct omap_mbox_queue *txq, *rxq;
+ struct device *dev;
+ struct omap_mbox_device *parent;
+ struct omap_mbox_fifo tx_fifo;
+ struct omap_mbox_fifo rx_fifo;
+ u32 ctx[OMAP4_MBOX_NR_REGS];
+ u32 intr_type;
+ int use_count;
+ struct blocking_notifier_head notifier;
+};
+
+/* global variables for the mailbox devices */
+static DEFINE_MUTEX(omap_mbox_devices_lock);
+static LIST_HEAD(omap_mbox_devices);
static unsigned int mbox_kfifo_size = CONFIG_OMAP_MBOX_KFIFO_SIZE;
module_param(mbox_kfifo_size, uint, S_IRUGO);
MODULE_PARM_DESC(mbox_kfifo_size, "Size of omap's mailbox kfifo (bytes)");
+static inline
+unsigned int mbox_read_reg(struct omap_mbox_device *mdev, size_t ofs)
+{
+ return __raw_readl(mdev->mbox_base + ofs);
+}
+
+static inline
+void mbox_write_reg(struct omap_mbox_device *mdev, u32 val, size_t ofs)
+{
+ __raw_writel(val, mdev->mbox_base + ofs);
+}
+
/* Mailbox FIFO handle functions */
-static inline mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
+static mbox_msg_t mbox_fifo_read(struct omap_mbox *mbox)
{
- return mbox->ops->fifo_read(mbox);
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+ return (mbox_msg_t) mbox_read_reg(mbox->parent, fifo->msg);
}
-static inline void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
+
+static void mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
{
- mbox->ops->fifo_write(mbox, msg);
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+ mbox_write_reg(mbox->parent, msg, fifo->msg);
}
-static inline int mbox_fifo_empty(struct omap_mbox *mbox)
+
+static int mbox_fifo_empty(struct omap_mbox *mbox)
{
- return mbox->ops->fifo_empty(mbox);
+ struct omap_mbox_fifo *fifo = &mbox->rx_fifo;
+ return (mbox_read_reg(mbox->parent, fifo->msg_stat) == 0);
}
-static inline int mbox_fifo_full(struct omap_mbox *mbox)
+
+static int mbox_fifo_full(struct omap_mbox *mbox)
{
- return mbox->ops->fifo_full(mbox);
+ struct omap_mbox_fifo *fifo = &mbox->tx_fifo;
+ return mbox_read_reg(mbox->parent, fifo->fifo_stat);
}
/* Mailbox IRQ handle functions */
-static inline void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+static void ack_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- if (mbox->ops->ack_irq)
- mbox->ops->ack_irq(mbox, irq);
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqstatus = fifo->irqstatus;
+
+ mbox_write_reg(mbox->parent, bit, irqstatus);
+
+ /* Flush posted write for irq status to avoid spurious interrupts */
+ mbox_read_reg(mbox->parent, irqstatus);
}
-static inline int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
+
+static int is_mbox_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- return mbox->ops->is_irq(mbox, irq);
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+ u32 irqstatus = fifo->irqstatus;
+
+ u32 enable = mbox_read_reg(mbox->parent, irqenable);
+ u32 status = mbox_read_reg(mbox->parent, irqstatus);
+
+ return (int)(enable & status & bit);
}
/*
* message sender
*/
-static int __mbox_poll_for_space(struct omap_mbox *mbox)
-{
- int ret = 0, i = 1000;
-
- while (mbox_fifo_full(mbox)) {
- if (mbox->ops->type == OMAP_MBOX_TYPE2)
- return -1;
- if (--i == 0)
- return -1;
- udelay(1);
- }
- return ret;
-}
-
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
{
struct omap_mbox_queue *mq = mbox->txq;
@@ -100,7 +196,7 @@ int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg)
goto out;
}
- if (kfifo_is_empty(&mq->fifo) && !__mbox_poll_for_space(mbox)) {
+ if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) {
mbox_fifo_write(mbox, msg);
goto out;
}
@@ -118,35 +214,69 @@ EXPORT_SYMBOL(omap_mbox_msg_send);
void omap_mbox_save_ctx(struct omap_mbox *mbox)
{
- if (!mbox->ops->save_ctx) {
- dev_err(mbox->dev, "%s:\tno save\n", __func__);
- return;
- }
+ int i;
+ int nr_regs;
+
+ if (mbox->intr_type)
+ nr_regs = OMAP4_MBOX_NR_REGS;
+ else
+ nr_regs = MBOX_NR_REGS;
+ for (i = 0; i < nr_regs; i++) {
+ mbox->ctx[i] = mbox_read_reg(mbox->parent, i * sizeof(u32));
- mbox->ops->save_ctx(mbox);
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox->ctx[i]);
+ }
}
EXPORT_SYMBOL(omap_mbox_save_ctx);
void omap_mbox_restore_ctx(struct omap_mbox *mbox)
{
- if (!mbox->ops->restore_ctx) {
- dev_err(mbox->dev, "%s:\tno restore\n", __func__);
- return;
- }
+ int i;
+ int nr_regs;
- mbox->ops->restore_ctx(mbox);
+ if (mbox->intr_type)
+ nr_regs = OMAP4_MBOX_NR_REGS;
+ else
+ nr_regs = MBOX_NR_REGS;
+ for (i = 0; i < nr_regs; i++) {
+ mbox_write_reg(mbox->parent, mbox->ctx[i], i * sizeof(u32));
+
+ dev_dbg(mbox->dev, "%s: [%02x] %08x\n", __func__,
+ i, mbox->ctx[i]);
+ }
}
EXPORT_SYMBOL(omap_mbox_restore_ctx);
void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- mbox->ops->enable_irq(mbox, irq);
+ u32 l;
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqenable = fifo->irqenable;
+
+ l = mbox_read_reg(mbox->parent, irqenable);
+ l |= bit;
+ mbox_write_reg(mbox->parent, l, irqenable);
}
EXPORT_SYMBOL(omap_mbox_enable_irq);
void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq)
{
- mbox->ops->disable_irq(mbox, irq);
+ struct omap_mbox_fifo *fifo = (irq == IRQ_TX) ?
+ &mbox->tx_fifo : &mbox->rx_fifo;
+ u32 bit = fifo->intr_bit;
+ u32 irqdisable = fifo->irqdisable;
+
+ /*
+ * Read and update the interrupt configuration register for pre-OMAP4.
+ * OMAP4 and later SoCs have a dedicated interrupt disabling register.
+ */
+ if (!mbox->intr_type)
+ bit = mbox_read_reg(mbox->parent, irqdisable) & ~bit;
+
+ mbox_write_reg(mbox->parent, bit, irqdisable);
}
EXPORT_SYMBOL(omap_mbox_disable_irq);
@@ -158,7 +288,7 @@ static void mbox_tx_tasklet(unsigned long tx_data)
int ret;
while (kfifo_len(&mq->fifo)) {
- if (__mbox_poll_for_space(mbox)) {
+ if (mbox_fifo_full(mbox)) {
omap_mbox_enable_irq(mbox, IRQ_TX);
break;
}
@@ -223,9 +353,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
WARN_ON(len != sizeof(msg));
-
- if (mbox->ops->type == OMAP_MBOX_TYPE1)
- break;
}
/* no more messages in the fifo. clear IRQ source. */
@@ -283,16 +410,12 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
{
int ret = 0;
struct omap_mbox_queue *mq;
+ struct omap_mbox_device *mdev = mbox->parent;
- mutex_lock(&mbox_configured_lock);
- if (!mbox_configured++) {
- if (likely(mbox->ops->startup)) {
- ret = mbox->ops->startup(mbox);
- if (unlikely(ret))
- goto fail_startup;
- } else
- goto fail_startup;
- }
+ mutex_lock(&mdev->cfg_lock);
+ ret = pm_runtime_get_sync(mdev->dev);
+ if (unlikely(ret < 0))
+ goto fail_startup;
if (!mbox->use_count++) {
mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
@@ -319,7 +442,7 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
omap_mbox_enable_irq(mbox, IRQ_RX);
}
- mutex_unlock(&mbox_configured_lock);
+ mutex_unlock(&mdev->cfg_lock);
return 0;
fail_request_irq:
@@ -327,18 +450,18 @@ fail_request_irq:
fail_alloc_rxq:
mbox_queue_free(mbox->txq);
fail_alloc_txq:
- if (mbox->ops->shutdown)
- mbox->ops->shutdown(mbox);
+ pm_runtime_put_sync(mdev->dev);
mbox->use_count--;
fail_startup:
- mbox_configured--;
- mutex_unlock(&mbox_configured_lock);
+ mutex_unlock(&mdev->cfg_lock);
return ret;
}
static void omap_mbox_fini(struct omap_mbox *mbox)
{
- mutex_lock(&mbox_configured_lock);
+ struct omap_mbox_device *mdev = mbox->parent;
+
+ mutex_lock(&mdev->cfg_lock);
if (!--mbox->use_count) {
omap_mbox_disable_irq(mbox, IRQ_RX);
@@ -349,28 +472,43 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
mbox_queue_free(mbox->rxq);
}
- if (likely(mbox->ops->shutdown)) {
- if (!--mbox_configured)
- mbox->ops->shutdown(mbox);
- }
+ pm_runtime_put_sync(mdev->dev);
- mutex_unlock(&mbox_configured_lock);
+ mutex_unlock(&mdev->cfg_lock);
}
-struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
+static struct omap_mbox *omap_mbox_device_find(struct omap_mbox_device *mdev,
+ const char *mbox_name)
{
struct omap_mbox *_mbox, *mbox = NULL;
- int i, ret;
+ struct omap_mbox **mboxes = mdev->mboxes;
+ int i;
if (!mboxes)
- return ERR_PTR(-EINVAL);
+ return NULL;
for (i = 0; (_mbox = mboxes[i]); i++) {
- if (!strcmp(_mbox->name, name)) {
+ if (!strcmp(_mbox->name, mbox_name)) {
mbox = _mbox;
break;
}
}
+ return mbox;
+}
+
+struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
+{
+ struct omap_mbox *mbox = NULL;
+ struct omap_mbox_device *mdev;
+ int ret;
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_for_each_entry(mdev, &omap_mbox_devices, elem) {
+ mbox = omap_mbox_device_find(mdev, name);
+ if (mbox)
+ break;
+ }
+ mutex_unlock(&omap_mbox_devices_lock);
if (!mbox)
return ERR_PTR(-ENOENT);
@@ -397,19 +535,20 @@ EXPORT_SYMBOL(omap_mbox_put);
static struct class omap_mbox_class = { .name = "mbox", };
-int omap_mbox_register(struct device *parent, struct omap_mbox **list)
+static int omap_mbox_register(struct omap_mbox_device *mdev)
{
int ret;
int i;
+ struct omap_mbox **mboxes;
- mboxes = list;
- if (!mboxes)
+ if (!mdev || !mdev->mboxes)
return -EINVAL;
+ mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++) {
struct omap_mbox *mbox = mboxes[i];
mbox->dev = device_create(&omap_mbox_class,
- parent, 0, mbox, "%s", mbox->name);
+ mdev->dev, 0, mbox, "%s", mbox->name);
if (IS_ERR(mbox->dev)) {
ret = PTR_ERR(mbox->dev);
goto err_out;
@@ -417,6 +556,11 @@ int omap_mbox_register(struct device *parent, struct omap_mbox **list)
BLOCKING_INIT_NOTIFIER_HEAD(&mbox->notifier);
}
+
+ mutex_lock(&omap_mbox_devices_lock);
+ list_add(&mdev->elem, &omap_mbox_devices);
+ mutex_unlock(&omap_mbox_devices_lock);
+
return 0;
err_out:
@@ -424,21 +568,148 @@ err_out:
device_unregister(mboxes[i]->dev);
return ret;
}
-EXPORT_SYMBOL(omap_mbox_register);
-int omap_mbox_unregister(void)
+static int omap_mbox_unregister(struct omap_mbox_device *mdev)
{
int i;
+ struct omap_mbox **mboxes;
- if (!mboxes)
+ if (!mdev || !mdev->mboxes)
return -EINVAL;
+ mutex_lock(&omap_mbox_devices_lock);
+ list_del(&mdev->elem);
+ mutex_unlock(&omap_mbox_devices_lock);
+
+ mboxes = mdev->mboxes;
for (i = 0; mboxes[i]; i++)
device_unregister(mboxes[i]->dev);
- mboxes = NULL;
return 0;
}
-EXPORT_SYMBOL(omap_mbox_unregister);
+
+static int omap_mbox_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int ret;
+ struct omap_mbox **list, *mbox, *mboxblk;
+ struct omap_mbox_pdata *pdata = pdev->dev.platform_data;
+ struct omap_mbox_dev_info *info;
+ struct omap_mbox_device *mdev;
+ struct omap_mbox_fifo *fifo;
+ u32 intr_type;
+ u32 l;
+ int i;
+
+ if (!pdata || !pdata->info_cnt || !pdata->info) {
+ pr_err("%s: platform not supported\n", __func__);
+ return -ENODEV;
+ }
+
+ mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(mdev->mbox_base))
+ return PTR_ERR(mdev->mbox_base);
+
+ /* allocate one extra for marking end of list */
+ list = devm_kzalloc(&pdev->dev, (pdata->info_cnt + 1) * sizeof(*list),
+ GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ mboxblk = devm_kzalloc(&pdev->dev, pdata->info_cnt * sizeof(*mbox),
+ GFP_KERNEL);
+ if (!mboxblk)
+ return -ENOMEM;
+
+ info = pdata->info;
+ intr_type = pdata->intr_type;
+ mbox = mboxblk;
+ for (i = 0; i < pdata->info_cnt; i++, info++) {
+ fifo = &mbox->tx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(info->tx_id);
+ fifo->fifo_stat = MAILBOX_FIFOSTATUS(info->tx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NOTFULL(info->tx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id);
+
+ fifo = &mbox->rx_fifo;
+ fifo->msg = MAILBOX_MESSAGE(info->rx_id);
+ fifo->msg_stat = MAILBOX_MSGSTATUS(info->rx_id);
+ fifo->intr_bit = MAILBOX_IRQ_NEWMSG(info->rx_id);
+ fifo->irqenable = MAILBOX_IRQENABLE(intr_type, info->usr_id);
+ fifo->irqstatus = MAILBOX_IRQSTATUS(intr_type, info->usr_id);
+ fifo->irqdisable = MAILBOX_IRQDISABLE(intr_type, info->usr_id);
+
+ mbox->intr_type = intr_type;
+
+ mbox->parent = mdev;
+ mbox->name = info->name;
+ mbox->irq = platform_get_irq(pdev, info->irq_id);
+ if (mbox->irq < 0)
+ return mbox->irq;
+ list[i] = mbox++;
+ }
+
+ mutex_init(&mdev->cfg_lock);
+ mdev->dev = &pdev->dev;
+ mdev->num_users = pdata->num_users;
+ mdev->num_fifos = pdata->num_fifos;
+ mdev->mboxes = list;
+ ret = omap_mbox_register(mdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, mdev);
+ pm_runtime_enable(mdev->dev);
+
+ ret = pm_runtime_get_sync(mdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(mdev->dev);
+ goto unregister;
+ }
+
+ /*
+ * just print the raw revision register, the format is not
+ * uniform across all SoCs
+ */
+ l = mbox_read_reg(mdev, MAILBOX_REVISION);
+ dev_info(mdev->dev, "omap mailbox rev 0x%x\n", l);
+
+ ret = pm_runtime_put_sync(mdev->dev);
+ if (ret < 0)
+ goto unregister;
+
+ return 0;
+
+unregister:
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+ return ret;
+}
+
+static int omap_mbox_remove(struct platform_device *pdev)
+{
+ struct omap_mbox_device *mdev = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(mdev->dev);
+ omap_mbox_unregister(mdev);
+
+ return 0;
+}
+
+static struct platform_driver omap_mbox_driver = {
+ .probe = omap_mbox_probe,
+ .remove = omap_mbox_remove,
+ .driver = {
+ .name = "omap-mailbox",
+ .owner = THIS_MODULE,
+ },
+};
static int __init omap_mbox_init(void)
{
@@ -453,12 +724,13 @@ static int __init omap_mbox_init(void)
mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
sizeof(mbox_msg_t));
- return 0;
+ return platform_driver_register(&omap_mbox_driver);
}
subsys_initcall(omap_mbox_init);
static void __exit omap_mbox_exit(void)
{
+ platform_driver_unregister(&omap_mbox_driver);
class_unregister(&omap_mbox_class);
}
module_exit(omap_mbox_exit);
diff --git a/drivers/mailbox/omap-mbox.h b/drivers/mailbox/omap-mbox.h
deleted file mode 100644
index 86d7518cd13b..000000000000
--- a/drivers/mailbox/omap-mbox.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * omap-mbox.h: OMAP mailbox internal definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef OMAP_MBOX_H
-#define OMAP_MBOX_H
-
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/kfifo.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/omap-mailbox.h>
-
-typedef int __bitwise omap_mbox_type_t;
-#define OMAP_MBOX_TYPE1 ((__force omap_mbox_type_t) 1)
-#define OMAP_MBOX_TYPE2 ((__force omap_mbox_type_t) 2)
-
-struct omap_mbox_ops {
- omap_mbox_type_t type;
- int (*startup)(struct omap_mbox *mbox);
- void (*shutdown)(struct omap_mbox *mbox);
- /* fifo */
- mbox_msg_t (*fifo_read)(struct omap_mbox *mbox);
- void (*fifo_write)(struct omap_mbox *mbox, mbox_msg_t msg);
- int (*fifo_empty)(struct omap_mbox *mbox);
- int (*fifo_full)(struct omap_mbox *mbox);
- /* irq */
- void (*enable_irq)(struct omap_mbox *mbox,
- omap_mbox_irq_t irq);
- void (*disable_irq)(struct omap_mbox *mbox,
- omap_mbox_irq_t irq);
- void (*ack_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
- int (*is_irq)(struct omap_mbox *mbox, omap_mbox_irq_t irq);
- /* ctx */
- void (*save_ctx)(struct omap_mbox *mbox);
- void (*restore_ctx)(struct omap_mbox *mbox);
-};
-
-struct omap_mbox_queue {
- spinlock_t lock;
- struct kfifo fifo;
- struct work_struct work;
- struct tasklet_struct tasklet;
- struct omap_mbox *mbox;
- bool full;
-};
-
-struct omap_mbox {
- const char *name;
- int irq;
- struct omap_mbox_queue *txq, *rxq;
- struct omap_mbox_ops *ops;
- struct device *dev;
- void *priv;
- int use_count;
- struct blocking_notifier_head notifier;
-};
-
-int omap_mbox_register(struct device *parent, struct omap_mbox **);
-int omap_mbox_unregister(void);
-
-#endif /* OMAP_MBOX_H */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 32fc19c540d4..1294238610df 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5961,7 +5961,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
int err = 0;
if (mddev->pers) {
- if (!mddev->pers->quiesce)
+ if (!mddev->pers->quiesce || !mddev->thread)
return -EBUSY;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
@@ -6263,7 +6263,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
rv = update_raid_disks(mddev, info->raid_disks);
if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) {
- if (mddev->pers->quiesce == NULL)
+ if (mddev->pers->quiesce == NULL || mddev->thread == NULL)
return -EINVAL;
if (mddev->recovery || mddev->sync_thread)
return -EBUSY;
@@ -7376,7 +7376,7 @@ void md_do_sync(struct md_thread *thread)
struct mddev *mddev2;
unsigned int currspeed = 0,
window;
- sector_t max_sectors,j, io_sectors;
+ sector_t max_sectors,j, io_sectors, recovery_done;
unsigned long mark[SYNC_MARKS];
unsigned long update_time;
sector_t mark_cnt[SYNC_MARKS];
@@ -7652,7 +7652,8 @@ void md_do_sync(struct md_thread *thread)
*/
cond_resched();
- currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
+ recovery_done = io_sectors - atomic_read(&mddev->recovery_active);
+ currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2
/((jiffies-mddev->resync_mark)/HZ +1) +1;
if (currspeed > speed_min(mddev)) {
@@ -8592,7 +8593,7 @@ static int __init md_init(void)
goto err_mdp;
mdp_major = ret;
- blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE,
+ blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
md_probe, NULL, NULL);
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
md_probe, NULL, NULL);
@@ -8687,7 +8688,7 @@ static __exit void md_exit(void)
struct list_head *tmp;
int delay = 1;
- blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
+ blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
unregister_blkdev(MD_MAJOR,"md");
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 407a99e46f69..cf91f5910c7c 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -685,6 +685,12 @@ static void *raid0_takeover(struct mddev *mddev)
* raid10 - assuming we have all necessary active disks
* raid1 - with (N -1) mirror drives faulty
*/
+
+ if (mddev->bitmap) {
+ printk(KERN_ERR "md/raid0: %s: cannot takeover array with bitmap\n",
+ mdname(mddev));
+ return ERR_PTR(-EBUSY);
+ }
if (mddev->level == 4)
return raid0_takeover_raid45(mddev);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 56e24c072b62..d7690f86fdb9 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1501,12 +1501,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
spin_unlock_irqrestore(&conf->device_lock, flags);
- /*
- * if recovery is running, make sure it aborts.
- */
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
} else
set_bit(Faulty, &rdev->flags);
+ /*
+ * if recovery is running, make sure it aborts.
+ */
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
printk(KERN_ALERT
"md/raid1:%s: Disk failure on %s, disabling device.\n"
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index cb882aae9e20..b08c18871323 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1684,13 +1684,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irqrestore(&conf->device_lock, flags);
return;
}
- if (test_and_clear_bit(In_sync, &rdev->flags)) {
+ if (test_and_clear_bit(In_sync, &rdev->flags))
mddev->degraded++;
- /*
- * if recovery is running, make sure it aborts.
- */
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- }
+ /*
+ * If recovery is running, make sure it aborts.
+ */
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index 34b0d0ddeef3..97afee672d07 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -421,23 +421,20 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
err = -ENOMEM;
/* get memory for various stuff */
- dev->d_rps0.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
- &dev->d_rps0.dma_handle);
+ dev->d_rps0.cpu_addr = pci_zalloc_consistent(pci, SAA7146_RPS_MEM,
+ &dev->d_rps0.dma_handle);
if (!dev->d_rps0.cpu_addr)
goto err_free_irq;
- memset(dev->d_rps0.cpu_addr, 0x0, SAA7146_RPS_MEM);
- dev->d_rps1.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
- &dev->d_rps1.dma_handle);
+ dev->d_rps1.cpu_addr = pci_zalloc_consistent(pci, SAA7146_RPS_MEM,
+ &dev->d_rps1.dma_handle);
if (!dev->d_rps1.cpu_addr)
goto err_free_rps0;
- memset(dev->d_rps1.cpu_addr, 0x0, SAA7146_RPS_MEM);
- dev->d_i2c.cpu_addr = pci_alloc_consistent(pci, SAA7146_RPS_MEM,
- &dev->d_i2c.dma_handle);
+ dev->d_i2c.cpu_addr = pci_zalloc_consistent(pci, SAA7146_RPS_MEM,
+ &dev->d_i2c.dma_handle);
if (!dev->d_i2c.cpu_addr)
goto err_free_rps1;
- memset(dev->d_i2c.cpu_addr, 0x0, SAA7146_RPS_MEM);
/* the rest + print status message */
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index d9e1d6395ed9..6c47f3fe9b0f 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -520,14 +520,15 @@ int saa7146_vv_init(struct saa7146_dev* dev, struct saa7146_ext_vv *ext_vv)
configuration data) */
dev->ext_vv_data = ext_vv;
- vv->d_clipping.cpu_addr = pci_alloc_consistent(dev->pci, SAA7146_CLIPPING_MEM, &vv->d_clipping.dma_handle);
+ vv->d_clipping.cpu_addr =
+ pci_zalloc_consistent(dev->pci, SAA7146_CLIPPING_MEM,
+ &vv->d_clipping.dma_handle);
if( NULL == vv->d_clipping.cpu_addr ) {
ERR("out of memory. aborting.\n");
kfree(vv);
v4l2_ctrl_handler_free(hdl);
return -1;
}
- memset(vv->d_clipping.cpu_addr, 0x0, SAA7146_CLIPPING_MEM);
saa7146_video_uops.init(dev,vv);
if (dev->ext_vv_data->capabilities & V4L2_CAP_VBI_CAPTURE)
diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
index d0c281f41a0a..11765835d7b2 100644
--- a/drivers/media/pci/bt8xx/bt878.c
+++ b/drivers/media/pci/bt8xx/bt878.c
@@ -101,28 +101,20 @@ static int bt878_mem_alloc(struct bt878 *bt)
if (!bt->buf_cpu) {
bt->buf_size = 128 * 1024;
- bt->buf_cpu =
- pci_alloc_consistent(bt->dev, bt->buf_size,
- &bt->buf_dma);
-
+ bt->buf_cpu = pci_zalloc_consistent(bt->dev, bt->buf_size,
+ &bt->buf_dma);
if (!bt->buf_cpu)
return -ENOMEM;
-
- memset(bt->buf_cpu, 0, bt->buf_size);
}
if (!bt->risc_cpu) {
bt->risc_size = PAGE_SIZE;
- bt->risc_cpu =
- pci_alloc_consistent(bt->dev, bt->risc_size,
- &bt->risc_dma);
-
+ bt->risc_cpu = pci_zalloc_consistent(bt->dev, bt->risc_size,
+ &bt->risc_dma);
if (!bt->risc_cpu) {
bt878_mem_free(bt);
return -ENOMEM;
}
-
- memset(bt->risc_cpu, 0, bt->risc_size);
}
return 0;
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 826228c3800e..4930b55fd5f4 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -1075,12 +1075,11 @@ static int AllocCommonBuffers(struct ngene *dev)
dev->ngenetohost = dev->FWInterfaceBuffer + 256;
dev->EventBuffer = dev->FWInterfaceBuffer + 512;
- dev->OverflowBuffer = pci_alloc_consistent(dev->pci_dev,
- OVERFLOW_BUFFER_SIZE,
- &dev->PAOverflowBuffer);
+ dev->OverflowBuffer = pci_zalloc_consistent(dev->pci_dev,
+ OVERFLOW_BUFFER_SIZE,
+ &dev->PAOverflowBuffer);
if (!dev->OverflowBuffer)
return -ENOMEM;
- memset(dev->OverflowBuffer, 0, OVERFLOW_BUFFER_SIZE);
for (i = STREAM_VIDEOIN1; i < MAX_STREAM; i++) {
int type = dev->card_info->io_type[i];
diff --git a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
index f166ffc9800a..cef7a00099ea 100644
--- a/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c
@@ -803,11 +803,9 @@ static int ttusb_alloc_iso_urbs(struct ttusb *ttusb)
{
int i;
- ttusb->iso_buffer = pci_alloc_consistent(NULL,
- ISO_FRAME_SIZE *
- FRAMES_PER_ISO_BUF *
- ISO_BUF_COUNT,
- &ttusb->iso_dma_handle);
+ ttusb->iso_buffer = pci_zalloc_consistent(NULL,
+ ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT,
+ &ttusb->iso_dma_handle);
if (!ttusb->iso_buffer) {
dprintk("%s: pci_alloc_consistent - not enough memory\n",
@@ -815,9 +813,6 @@ static int ttusb_alloc_iso_urbs(struct ttusb *ttusb)
return -ENOMEM;
}
- memset(ttusb->iso_buffer, 0,
- ISO_FRAME_SIZE * FRAMES_PER_ISO_BUF * ISO_BUF_COUNT);
-
for (i = 0; i < ISO_BUF_COUNT; i++) {
struct urb *urb;
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index 29724af9b9ab..15ab584cf265 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -1151,11 +1151,9 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
dprintk("%s\n", __func__);
- dec->iso_buffer = pci_alloc_consistent(NULL,
- ISO_FRAME_SIZE *
- (FRAMES_PER_ISO_BUF *
- ISO_BUF_COUNT),
- &dec->iso_dma_handle);
+ dec->iso_buffer = pci_zalloc_consistent(NULL,
+ ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT),
+ &dec->iso_dma_handle);
if (!dec->iso_buffer) {
dprintk("%s: pci_alloc_consistent - not enough memory\n",
@@ -1163,9 +1161,6 @@ static int ttusb_dec_alloc_iso_urbs(struct ttusb_dec *dec)
return -ENOMEM;
}
- memset(dec->iso_buffer, 0,
- ISO_FRAME_SIZE * (FRAMES_PER_ISO_BUF * ISO_BUF_COUNT));
-
for (i = 0; i < ISO_BUF_COUNT; i++) {
struct urb *urb;
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index c59e9c96e86d..fab81a143bd7 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -61,6 +61,16 @@ config TEGRA30_MC
analysis, especially for IOMMU/SMMU(System Memory Management
Unit) module.
+config FSL_CORENET_CF
+ tristate "Freescale CoreNet Error Reporting"
+ depends on FSL_SOC_BOOKE
+ help
+ Say Y for reporting of errors from the Freescale CoreNet
+ Coherency Fabric. Errors reported include accesses to
+ physical addresses that mapped by no local access window
+ (LAW) or an invalid LAW, as well as bad cache state that
+ represents a coherency violation.
+
config FSL_IFC
bool
depends on FSL_SOC
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 71160a2b7313..4055c47f45ab 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_OF) += of_memory.o
endif
obj-$(CONFIG_TI_AEMIF) += ti-aemif.o
obj-$(CONFIG_TI_EMIF) += emif.o
+obj-$(CONFIG_FSL_CORENET_CF) += fsl-corenet-cf.o
obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_MVEBU_DEVBUS) += mvebu-devbus.o
obj-$(CONFIG_TEGRA20_MC) += tegra20-mc.o
diff --git a/drivers/memory/fsl-corenet-cf.c b/drivers/memory/fsl-corenet-cf.c
new file mode 100644
index 000000000000..c9443fc136db
--- /dev/null
+++ b/drivers/memory/fsl-corenet-cf.c
@@ -0,0 +1,251 @@
+/*
+ * CoreNet Coherency Fabric error reporting
+ *
+ * Copyright 2014 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+
+enum ccf_version {
+ CCF1,
+ CCF2,
+};
+
+struct ccf_info {
+ enum ccf_version version;
+ int err_reg_offs;
+};
+
+static const struct ccf_info ccf1_info = {
+ .version = CCF1,
+ .err_reg_offs = 0xa00,
+};
+
+static const struct ccf_info ccf2_info = {
+ .version = CCF2,
+ .err_reg_offs = 0xe40,
+};
+
+static const struct of_device_id ccf_matches[] = {
+ {
+ .compatible = "fsl,corenet1-cf",
+ .data = &ccf1_info,
+ },
+ {
+ .compatible = "fsl,corenet2-cf",
+ .data = &ccf2_info,
+ },
+ {}
+};
+
+struct ccf_err_regs {
+ u32 errdet; /* 0x00 Error Detect Register */
+ /* 0x04 Error Enable (ccf1)/Disable (ccf2) Register */
+ u32 errdis;
+ /* 0x08 Error Interrupt Enable Register (ccf2 only) */
+ u32 errinten;
+ u32 cecar; /* 0x0c Error Capture Attribute Register */
+ u32 cecaddrh; /* 0x10 Error Capture Address High */
+ u32 cecaddrl; /* 0x14 Error Capture Address Low */
+ u32 cecar2; /* 0x18 Error Capture Attribute Register 2 */
+};
+
+/* LAE/CV also valid for errdis and errinten */
+#define ERRDET_LAE (1 << 0) /* Local Access Error */
+#define ERRDET_CV (1 << 1) /* Coherency Violation */
+#define ERRDET_CTYPE_SHIFT 26 /* Capture Type (ccf2 only) */
+#define ERRDET_CTYPE_MASK (0x1f << ERRDET_CTYPE_SHIFT)
+#define ERRDET_CAP (1 << 31) /* Capture Valid (ccf2 only) */
+
+#define CECAR_VAL (1 << 0) /* Valid (ccf1 only) */
+#define CECAR_UVT (1 << 15) /* Unavailable target ID (ccf1) */
+#define CECAR_SRCID_SHIFT_CCF1 24
+#define CECAR_SRCID_MASK_CCF1 (0xff << CECAR_SRCID_SHIFT_CCF1)
+#define CECAR_SRCID_SHIFT_CCF2 18
+#define CECAR_SRCID_MASK_CCF2 (0xff << CECAR_SRCID_SHIFT_CCF2)
+
+#define CECADDRH_ADDRH 0xff
+
+struct ccf_private {
+ const struct ccf_info *info;
+ struct device *dev;
+ void __iomem *regs;
+ struct ccf_err_regs __iomem *err_regs;
+};
+
+static irqreturn_t ccf_irq(int irq, void *dev_id)
+{
+ struct ccf_private *ccf = dev_id;
+ static DEFINE_RATELIMIT_STATE(ratelimit, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ u32 errdet, cecar, cecar2;
+ u64 addr;
+ u32 src_id;
+ bool uvt = false;
+ bool cap_valid = false;
+
+ errdet = ioread32be(&ccf->err_regs->errdet);
+ cecar = ioread32be(&ccf->err_regs->cecar);
+ cecar2 = ioread32be(&ccf->err_regs->cecar2);
+ addr = ioread32be(&ccf->err_regs->cecaddrl);
+ addr |= ((u64)(ioread32be(&ccf->err_regs->cecaddrh) &
+ CECADDRH_ADDRH)) << 32;
+
+ if (!__ratelimit(&ratelimit))
+ goto out;
+
+ switch (ccf->info->version) {
+ case CCF1:
+ if (cecar & CECAR_VAL) {
+ if (cecar & CECAR_UVT)
+ uvt = true;
+
+ src_id = (cecar & CECAR_SRCID_MASK_CCF1) >>
+ CECAR_SRCID_SHIFT_CCF1;
+ cap_valid = true;
+ }
+
+ break;
+ case CCF2:
+ if (errdet & ERRDET_CAP) {
+ src_id = (cecar & CECAR_SRCID_MASK_CCF2) >>
+ CECAR_SRCID_SHIFT_CCF2;
+ cap_valid = true;
+ }
+
+ break;
+ }
+
+ dev_crit(ccf->dev, "errdet 0x%08x cecar 0x%08x cecar2 0x%08x\n",
+ errdet, cecar, cecar2);
+
+ if (errdet & ERRDET_LAE) {
+ if (uvt)
+ dev_crit(ccf->dev, "LAW Unavailable Target ID\n");
+ else
+ dev_crit(ccf->dev, "Local Access Window Error\n");
+ }
+
+ if (errdet & ERRDET_CV)
+ dev_crit(ccf->dev, "Coherency Violation\n");
+
+ if (cap_valid) {
+ dev_crit(ccf->dev, "address 0x%09llx, src id 0x%x\n",
+ addr, src_id);
+ }
+
+out:
+ iowrite32be(errdet, &ccf->err_regs->errdet);
+ return errdet ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int ccf_probe(struct platform_device *pdev)
+{
+ struct ccf_private *ccf;
+ struct resource *r;
+ const struct of_device_id *match;
+ int ret, irq;
+
+ match = of_match_device(ccf_matches, &pdev->dev);
+ if (WARN_ON(!match))
+ return -ENODEV;
+
+ ccf = devm_kzalloc(&pdev->dev, sizeof(*ccf), GFP_KERNEL);
+ if (!ccf)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
+ return -ENXIO;
+ }
+
+ ccf->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ccf->regs)) {
+ dev_err(&pdev->dev, "%s: can't map mem resource\n", __func__);
+ return PTR_ERR(ccf->regs);
+ }
+
+ ccf->dev = &pdev->dev;
+ ccf->info = match->data;
+ ccf->err_regs = ccf->regs + ccf->info->err_reg_offs;
+
+ dev_set_drvdata(&pdev->dev, ccf);
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "%s: no irq\n", __func__);
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, ccf_irq, 0, pdev->name, ccf);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: can't request irq\n", __func__);
+ return ret;
+ }
+
+ switch (ccf->info->version) {
+ case CCF1:
+ /* On CCF1 this register enables rather than disables. */
+ iowrite32be(ERRDET_LAE | ERRDET_CV, &ccf->err_regs->errdis);
+ break;
+
+ case CCF2:
+ iowrite32be(0, &ccf->err_regs->errdis);
+ iowrite32be(ERRDET_LAE | ERRDET_CV, &ccf->err_regs->errinten);
+ break;
+ }
+
+ return 0;
+}
+
+static int ccf_remove(struct platform_device *pdev)
+{
+ struct ccf_private *ccf = dev_get_drvdata(&pdev->dev);
+
+ switch (ccf->info->version) {
+ case CCF1:
+ iowrite32be(0, &ccf->err_regs->errdis);
+ break;
+
+ case CCF2:
+ /*
+ * We clear errdis on ccf1 because that's the only way to
+ * disable interrupts, but on ccf2 there's no need to disable
+ * detection.
+ */
+ iowrite32be(0, &ccf->err_regs->errinten);
+ break;
+ }
+
+ return 0;
+}
+
+static struct platform_driver ccf_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ccf_matches,
+ },
+ .probe = ccf_probe,
+ .remove = ccf_remove,
+};
+
+module_platform_driver(ccf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale CoreNet Coherency Fabric error reporting");
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index ebc0af7d769c..a896d948b79e 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -649,12 +649,10 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
case MPI_FUNCTION_CONFIG:
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
- if (reply) {
- ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
- memcpy(ioc->mptbase_cmds.reply, reply,
- min(MPT_DEFAULT_FRAME_SIZE,
- 4 * reply->u.reply.MsgLength));
- }
+ ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->mptbase_cmds.reply, reply,
+ min(MPT_DEFAULT_FRAME_SIZE,
+ 4 * reply->u.reply.MsgLength));
if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
complete(&ioc->mptbase_cmds.done);
@@ -1408,8 +1406,8 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
* in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
*
**/
-static void
-mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
+static const char*
+mpt_get_product_name(u16 vendor, u16 device, u8 revision)
{
char *product_str = NULL;
@@ -1635,8 +1633,7 @@ mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
}
out:
- if (product_str)
- sprintf(prod_name, "%s", product_str);
+ return product_str;
}
/**
@@ -1887,8 +1884,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
- mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
- ioc->prod_name);
+ ioc->prod_name = mpt_get_product_name(pdev->vendor, pdev->device,
+ pdev->revision);
switch (pdev->device)
{
@@ -7007,7 +7004,7 @@ EXPORT_SYMBOL(mpt_halt_firmware);
* IOC doesn't reply to any outstanding request. This will transfer IOC
* to READY state.
**/
-int
+static int
mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
{
int rc;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 76c05bc24cb7..8f14090b8b71 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -405,7 +405,7 @@ typedef struct _VirtTarget {
typedef struct _VirtDevice {
VirtTarget *vtarget;
u8 configured_lun;
- int lun;
+ u64 lun;
} VirtDevice;
/*
@@ -605,7 +605,7 @@ typedef struct _MPT_ADAPTER
int id; /* Unique adapter id N {0,1,2,...} */
int pci_irq; /* This irq */
char name[MPT_NAME_LENGTH]; /* "iocN" */
- char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
+ const char *prod_name; /* "LSIFC9x9" */
#ifdef CONFIG_FUSION_LOGGING
/* used in mpt_display_event_info */
char evStr[EVENT_DESCR_STR_SZ];
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 8a050e885688..b0a892a2bf1b 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1261,19 +1261,11 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
else
return -EFAULT;
- karg = kmalloc(data_size, GFP_KERNEL);
- if (karg == NULL) {
- printk(KERN_ERR MYNAM "%s::mpt_ioctl_iocinfo() @%d - no memory available!\n",
- __FILE__, __LINE__);
- return -ENOMEM;
- }
-
- if (copy_from_user(karg, uarg, data_size)) {
- printk(KERN_ERR MYNAM "%s@%d::mptctl_getiocinfo - "
- "Unable to read in mpt_ioctl_iocinfo struct @ %p\n",
- __FILE__, __LINE__, uarg);
- kfree(karg);
- return -EFAULT;
+ karg = memdup_user(uarg, data_size);
+ if (IS_ERR(karg)) {
+ printk(KERN_ERR MYNAM "%s@%d::mpt_ioctl_iocinfo() - memdup_user returned error [%ld]\n",
+ __FILE__, __LINE__, PTR_ERR(karg));
+ return PTR_ERR(karg);
}
if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 02a3eefd6931..d8bf84aef602 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -204,7 +204,7 @@ mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
|| (loops > 0 && ioc->active == 0)) {
spin_unlock_irqrestore(shost->host_lock, flags);
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
- "mptfc_block_error_handler.%d: %d:%d, port status is "
+ "mptfc_block_error_handler.%d: %d:%llu, port status is "
"%x, active flag %d, deferring %s recovery.\n",
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun,
@@ -218,7 +218,7 @@ mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
|| ioc->active == 0) {
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
- "%s.%d: %d:%d, failing recovery, "
+ "%s.%d: %d:%llu, failing recovery, "
"port state %x, active %d, vdevice %p.\n", caller,
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun, ready,
@@ -226,7 +226,7 @@ mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
return FAILED;
}
dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
- "%s.%d: %d:%d, executing recovery.\n", caller,
+ "%s.%d: %d:%llu, executing recovery.\n", caller,
ioc->name, ioc->sh->host_no,
SCpnt->device->id, SCpnt->device->lun));
return (*func)(SCpnt);
@@ -525,8 +525,7 @@ mptfc_target_destroy(struct scsi_target *starget)
if (ri) /* better be! */
ri->starget = NULL;
}
- if (starget->hostdata)
- kfree(starget->hostdata);
+ kfree(starget->hostdata);
starget->hostdata = NULL;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 711fcb5cec87..0707fa2c701b 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -990,11 +990,10 @@ mptsas_queue_device_delete(MPT_ADAPTER *ioc,
MpiEventDataSasDeviceStatusChange_t *sas_event_data)
{
struct fw_event_work *fw_event;
- int sz;
- sz = offsetof(struct fw_event_work, event_data) +
- sizeof(MpiEventDataSasDeviceStatusChange_t);
- fw_event = kzalloc(sz, GFP_ATOMIC);
+ fw_event = kzalloc(sizeof(*fw_event) +
+ sizeof(MpiEventDataSasDeviceStatusChange_t),
+ GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
ioc->name, __func__, __LINE__);
@@ -1011,10 +1010,8 @@ static void
mptsas_queue_rescan(MPT_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- int sz;
- sz = offsetof(struct fw_event_work, event_data);
- fw_event = kzalloc(sz, GFP_ATOMIC);
+ fw_event = kzalloc(sizeof(*fw_event), GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
ioc->name, __func__, __LINE__);
@@ -1206,27 +1203,28 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
"(mf = %p, mr = %p)\n", ioc->name, mf, mr));
pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
- if (pScsiTmReply) {
- dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
- "\ttask_type = 0x%02X, iocstatus = 0x%04X "
- "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
- "term_cmnds = %d\n", ioc->name,
- pScsiTmReply->Bus, pScsiTmReply->TargetID,
- pScsiTmReply->TaskType,
- le16_to_cpu(pScsiTmReply->IOCStatus),
- le32_to_cpu(pScsiTmReply->IOCLogInfo),
- pScsiTmReply->ResponseCode,
- le32_to_cpu(pScsiTmReply->TerminationCount)));
-
- if (pScsiTmReply->ResponseCode)
- mptscsih_taskmgmt_response_code(ioc,
- pScsiTmReply->ResponseCode);
- }
-
- if (pScsiTmReply && (pScsiTmReply->TaskType ==
+ if (!pScsiTmReply)
+ return 0;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
+ "\ttask_type = 0x%02X, iocstatus = 0x%04X "
+ "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
+ "term_cmnds = %d\n", ioc->name,
+ pScsiTmReply->Bus, pScsiTmReply->TargetID,
+ pScsiTmReply->TaskType,
+ le16_to_cpu(pScsiTmReply->IOCStatus),
+ le32_to_cpu(pScsiTmReply->IOCLogInfo),
+ pScsiTmReply->ResponseCode,
+ le32_to_cpu(pScsiTmReply->TerminationCount)));
+
+ if (pScsiTmReply->ResponseCode)
+ mptscsih_taskmgmt_response_code(ioc,
+ pScsiTmReply->ResponseCode);
+
+ if (pScsiTmReply->TaskType ==
MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
- MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
+ MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET) {
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
memcpy(ioc->taskmgmt_cmds.reply, mr,
@@ -1575,7 +1573,7 @@ mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
mptsas_port_delete(ioc, phy_info->port_details);
}
-struct mptsas_phyinfo *
+static struct mptsas_phyinfo *
mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
struct mptsas_devinfo *sas_device)
{
@@ -3648,7 +3646,7 @@ mptsas_send_expander_event(struct fw_event_work *fw_event)
* @handle:
*
*/
-struct mptsas_portinfo *
+static struct mptsas_portinfo *
mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
{
struct mptsas_portinfo buffer, *port_info;
@@ -3763,7 +3761,7 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
printk(MYIOC_s_DEBUG_FMT
"SDEV OUTSTANDING CMDS"
"%d\n", ioc->name,
- sdev->device_busy));
+ atomic_read(&sdev->device_busy)));
}
}
@@ -3856,10 +3854,8 @@ retry_page:
phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
sas_info->sas_address);
- if (phy_info) {
- mptsas_del_end_device(ioc, phy_info);
- goto redo_device_scan;
- }
+ mptsas_del_end_device(ioc, phy_info);
+ goto redo_device_scan;
} else
mptsas_volume_delete(ioc, sas_info->fw.id);
}
@@ -3870,9 +3866,8 @@ retry_page:
redo_expander_scan:
list_for_each_entry(port_info, &ioc->sas_topology, list) {
- if (port_info->phy_info &&
- (!(port_info->phy_info[0].identify.device_info &
- MPI_SAS_DEVICE_INFO_SMP_TARGET)))
+ if (!(port_info->phy_info[0].identify.device_info &
+ MPI_SAS_DEVICE_INFO_SMP_TARGET))
continue;
found_expander = 0;
handle = 0xFFFF;
@@ -4983,7 +4978,7 @@ static int
mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
{
u32 event = le32_to_cpu(reply->Event);
- int sz, event_data_sz;
+ int event_data_sz;
struct fw_event_work *fw_event;
unsigned long delay;
@@ -5093,8 +5088,7 @@ mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
event_data_sz = ((reply->MsgLength * 4) -
offsetof(EventNotificationReply_t, Data));
- sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
- fw_event = kzalloc(sz, GFP_ATOMIC);
+ fw_event = kzalloc(sizeof(*fw_event) + event_data_sz, GFP_ATOMIC);
if (!fw_event) {
printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
__func__, __LINE__);
@@ -5321,7 +5315,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return error;
}
-void
+static void
mptsas_shutdown(struct pci_dev *pdev)
{
MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
index 57e86ab77661..c396483d3624 100644
--- a/drivers/message/fusion/mptsas.h
+++ b/drivers/message/fusion/mptsas.h
@@ -110,7 +110,7 @@ struct fw_event_work {
MPT_ADAPTER *ioc;
u32 event;
u8 retries;
- u8 __attribute__((aligned(4))) event_data[1];
+ char event_data[0] __aligned(4);
};
struct mptsas_discovery_event {
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 2a1c6f21af27..e7dcb2583369 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -95,7 +95,7 @@ static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
- int lun, int ctx2abort, ulong timeout);
+ u64 lun, int ctx2abort, ulong timeout);
int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
@@ -536,7 +536,7 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc
}
scsi_print_command(sc);
- printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
+ printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %llu\n",
ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
"resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
@@ -692,7 +692,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
*/
if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
pScsiReply->ResponseInfo) {
- printk(MYIOC_s_NOTE_FMT "[%d:%d:%d:%d] "
+ printk(MYIOC_s_NOTE_FMT "[%d:%d:%d:%llu] "
"FCP_ResponseInfo=%08xh\n", ioc->name,
sc->device->host->host_no, sc->device->channel,
sc->device->id, sc->device->lun,
@@ -1155,7 +1155,7 @@ mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSI
return;
ioc = hd->ioc;
if (time - hd->last_queue_full > 10 * HZ) {
- dprintk(ioc, printk(MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n",
+ dprintk(ioc, printk(MYIOC_s_WARN_FMT "Device (%d:%d:%llu) reported QUEUE_FULL!\n",
ioc->name, 0, sc->device->id, sc->device->lun));
hd->last_queue_full = time;
}
@@ -1271,15 +1271,13 @@ mptscsih_info(struct Scsi_Host *SChost)
h = shost_priv(SChost);
- if (h) {
- if (h->info_kbuf == NULL)
- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
- return h->info_kbuf;
- h->info_kbuf[0] = '\0';
+ if (h->info_kbuf == NULL)
+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+ return h->info_kbuf;
+ h->info_kbuf[0] = '\0';
- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
- h->info_kbuf[size-1] = '\0';
- }
+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
+ h->info_kbuf[size-1] = '\0';
return h->info_kbuf;
}
@@ -1368,8 +1366,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt)
/* Default to untagged. Once a target structure has been allocated,
* use the Inquiry data to determine if device supports tagged.
*/
- if (vdevice
- && (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
+ if ((vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
&& (SCpnt->device->tagged_supported)) {
scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
if (SCpnt->request && SCpnt->request->ioprio) {
@@ -1518,7 +1515,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
*
**/
int
-mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
+mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
int ctx2abort, ulong timeout)
{
MPT_FRAME_HDR *mf;
@@ -2380,7 +2377,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
vdevice = sdev->hostdata;
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "device @ %p, channel=%d, id=%d, lun=%d\n",
+ "device @ %p, channel=%d, id=%d, lun=%llu\n",
ioc->name, sdev, sdev->channel, sdev->id, sdev->lun));
if (ioc->bus_type == SPI)
dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
@@ -2971,7 +2968,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
+ (my_idx * MPT_SENSE_BUFFER_ALLOC));
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
- "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
+ "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%llu\n",
ioc->name, __func__, cmd, io->channel, io->id, io->lun));
if (dir == MPI_SCSIIO_CONTROL_READ)
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 99e3390807f3..e1b1a198a62a 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -98,7 +98,7 @@ typedef struct _internal_cmd {
u8 cmd; /* SCSI Op Code */
u8 channel; /* bus number */
u8 id; /* SCSI ID (virtual) */
- int lun;
+ u64 lun;
u8 flags; /* Bit Field - See above */
u8 physDiskNum; /* Phys disk number, -1 else */
u8 rsvd2;
@@ -115,7 +115,7 @@ extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *);
extern const char * mptscsih_info(struct Scsi_Host *SChost);
extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt);
extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
- u8 id, int lun, int ctx2abort, ulong timeout);
+ u8 id, u64 lun, int ctx2abort, ulong timeout);
extern void mptscsih_slave_destroy(struct scsi_device *device);
extern int mptscsih_slave_configure(struct scsi_device *device);
extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 49d11338294b..787933d43d32 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -461,8 +461,7 @@ static int mptspi_target_alloc(struct scsi_target *starget)
static void
mptspi_target_destroy(struct scsi_target *starget)
{
- if (starget->hostdata)
- kfree(starget->hostdata);
+ kfree(starget->hostdata);
starget->hostdata = NULL;
}
@@ -620,7 +619,7 @@ static void mptspi_read_parameters(struct scsi_target *starget)
spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
}
-int
+static int
mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
{
MPT_ADAPTER *ioc = hd->ioc;
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 1d31d7284cbd..8152e9fa9d95 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -78,7 +78,7 @@ static unsigned int i2o_scsi_max_lun = 255;
struct i2o_scsi_host {
struct Scsi_Host *scsi_host; /* pointer to the SCSI host */
struct i2o_controller *iop; /* pointer to the I2O controller */
- unsigned int lun; /* lun's used for block devices */
+ u64 lun; /* lun's used for block devices */
struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */
};
@@ -287,9 +287,8 @@ static int i2o_scsi_probe(struct device *dev)
}
if (le64_to_cpu(lun) >= scsi_host->max_lun) {
- osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
- (long unsigned int)le64_to_cpu(lun),
- scsi_host->max_lun);
+ osm_warn("SCSI device lun (%llu) >= max_lun of I2O host (%llu)",
+ le64_to_cpu(lun), scsi_host->max_lun);
return -EFAULT;
}
@@ -308,9 +307,9 @@ static int i2o_scsi_probe(struct device *dev)
if (rc)
goto err;
- osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
+ osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %llu\n",
i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
- (long unsigned int)le64_to_cpu(lun));
+ le64_to_cpu(lun));
return 0;
diff --git a/drivers/mfd/88pm805.c b/drivers/mfd/88pm805.c
index 64751c2a1ace..e9d50644660c 100644
--- a/drivers/mfd/88pm805.c
+++ b/drivers/mfd/88pm805.c
@@ -158,7 +158,7 @@ static int device_irq_init_805(struct pm80x_chip *chip)
* PM805_INT_STATUS is under 32K clock domain, so need to
* add proper delay before the next I2C register access.
*/
- msleep(1);
+ usleep_range(1000, 3000);
if (ret < 0)
goto out;
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
index bcfc9e85b4a0..3a2604580164 100644
--- a/drivers/mfd/88pm860x-core.c
+++ b/drivers/mfd/88pm860x-core.c
@@ -2,7 +2,8 @@
* Base driver for Marvell 88PM8607
*
* Copyright (C) 2009 Marvell International Ltd.
- * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -140,7 +141,8 @@ static struct resource codec_resources[] = {
/* Headset insertion or removal */
{PM8607_IRQ_HEADSET, PM8607_IRQ_HEADSET, "headset", IORESOURCE_IRQ,},
/* Audio short */
- {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,},
+ {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short",
+ IORESOURCE_IRQ,},
};
static struct resource battery_resources[] = {
@@ -150,10 +152,14 @@ static struct resource battery_resources[] = {
static struct resource charger_resources[] = {
{PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,},
- {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,},
- {PM8607_IRQ_CHG_FAIL, PM8607_IRQ_CHG_FAIL, "charging timeout", IORESOURCE_IRQ,},
- {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging fault", IORESOURCE_IRQ,},
- {PM8607_IRQ_GPADC1, PM8607_IRQ_GPADC1, "battery temperature", IORESOURCE_IRQ,},
+ {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done",
+ IORESOURCE_IRQ,},
+ {PM8607_IRQ_CHG_FAIL, PM8607_IRQ_CHG_FAIL, "charging timeout",
+ IORESOURCE_IRQ,},
+ {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging fault",
+ IORESOURCE_IRQ,},
+ {PM8607_IRQ_GPADC1, PM8607_IRQ_GPADC1, "battery temperature",
+ IORESOURCE_IRQ,},
{PM8607_IRQ_VBAT, PM8607_IRQ_VBAT, "battery voltage", IORESOURCE_IRQ,},
{PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,},
};
@@ -568,8 +574,8 @@ static struct irq_domain_ops pm860x_irq_domain_ops = {
static int device_irq_init(struct pm860x_chip *chip,
struct pm860x_platform_data *pdata)
{
- struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
- : chip->companion;
+ struct i2c_client *i2c = (chip->id == CHIP_PM8607) ?
+ chip->client : chip->companion;
unsigned char status_buf[INT_STATUS_NUM];
unsigned long flags = IRQF_TRIGGER_FALLING | IRQF_ONESHOT;
int data, mask, ret = -EINVAL;
@@ -631,8 +637,8 @@ static int device_irq_init(struct pm860x_chip *chip,
if (!chip->core_irq)
goto out;
- ret = request_threaded_irq(chip->core_irq, NULL, pm860x_irq, flags | IRQF_ONESHOT,
- "88pm860x", chip);
+ ret = request_threaded_irq(chip->core_irq, NULL, pm860x_irq,
+ flags | IRQF_ONESHOT, "88pm860x", chip);
if (ret) {
dev_err(chip->dev, "Failed to request IRQ: %d\n", ret);
chip->core_irq = 0;
@@ -871,7 +877,7 @@ static void device_rtc_init(struct pm860x_chip *chip,
{
int ret;
- if ((pdata == NULL))
+ if (!pdata)
return;
rtc_devs[0].platform_data = pdata->rtc;
@@ -997,8 +1003,9 @@ static void device_8607_init(struct pm860x_chip *chip,
ret);
break;
default:
- dev_err(chip->dev, "Failed to detect Marvell 88PM8607. "
- "Chip ID: %02x\n", ret);
+ dev_err(chip->dev,
+ "Failed to detect Marvell 88PM8607. Chip ID: %02x\n",
+ ret);
goto out;
}
@@ -1120,8 +1127,8 @@ static int pm860x_dt_init(struct device_node *np,
ret = of_property_read_u32(np, "marvell,88pm860x-slave-addr",
&pdata->companion_addr);
if (ret) {
- dev_err(dev, "Not found \"marvell,88pm860x-slave-addr\" "
- "property\n");
+ dev_err(dev,
+ "Not found \"marvell,88pm860x-slave-addr\" property\n");
pdata->companion_addr = 0;
}
return 0;
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index ff8f803ce833..a93b4d0134a2 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -2,7 +2,8 @@
* I2C driver for Marvell 88PM860x
*
* Copyright (C) 2009 Marvell International Ltd.
- * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index fb824f501197..de5abf244746 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -13,7 +13,7 @@ config MFD_CORE
config MFD_CS5535
tristate "AMD CS5535 and CS5536 southbridge core functions"
select MFD_CORE
- depends on PCI && X86
+ depends on PCI && (X86_32 || (X86 && COMPILE_TEST))
---help---
This is the core driver for CS5535/CS5536 MFD functions. This is
necessary for using the board's GPIO and MFGPT functionality.
@@ -187,6 +187,7 @@ config MFD_MC13XXX
tristate
depends on (SPI_MASTER || I2C)
select MFD_CORE
+ select REGMAP_IRQ
help
Enable support for the Freescale MC13783 and MC13892 PMICs.
This driver provides common support for accessing the device,
@@ -253,6 +254,18 @@ config LPC_SCH
LPC bridge function of the Intel SCH provides support for
System Management Bus and General Purpose I/O.
+config INTEL_SOC_PMIC
+ bool "Support for Intel Atom SoC PMIC"
+ depends on I2C=y
+ select MFD_CORE
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ help
+ Select this option to enable support for the PMIC device
+ on some Intel SoC systems. The PMIC provides ADC, GPIO,
+ thermal, charger and related power management functions
+ on these systems.
+
config MFD_INTEL_MSIC
bool "Intel MSIC"
depends on INTEL_SCU_IPC
@@ -367,14 +380,15 @@ config MFD_MAX14577
of the device.
config MFD_MAX77686
- bool "Maxim Semiconductor MAX77686 PMIC Support"
+ bool "Maxim Semiconductor MAX77686/802 PMIC Support"
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
select IRQ_DOMAIN
help
- Say yes here to add support for Maxim Semiconductor MAX77686.
- This is a Power Management IC with RTC on chip.
+ Say yes here to add support for Maxim Semiconductor MAX77686 and
+ MAX77802 which are Power Management IC with an RTC on chip.
This driver provides common support for accessing the device;
additional drivers must be enabled in order to use the functionality
of the device.
@@ -574,6 +588,7 @@ config MFD_SEC_CORE
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
+ select REGULATOR
help
Support for the Samsung Electronics MFD series.
This driver provides common support for accessing the device,
@@ -1057,7 +1072,7 @@ config MFD_LM3533
config MFD_TIMBERDALE
tristate "Timberdale FPGA"
select MFD_CORE
- depends on PCI && GPIOLIB
+ depends on PCI && GPIOLIB && (X86_32 || COMPILE_TEST)
---help---
This is the core driver for the timberdale FPGA. This device is a
multifunction device which exposes numerous platform devices.
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 8c6e7bba4660..f00148782d9b 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -115,7 +115,7 @@ da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o
obj-$(CONFIG_MFD_DA9063) += da9063.o
obj-$(CONFIG_MFD_MAX14577) += max14577.o
-obj-$(CONFIG_MFD_MAX77686) += max77686.o max77686-irq.o
+obj-$(CONFIG_MFD_MAX77686) += max77686.o
obj-$(CONFIG_MFD_MAX77693) += max77693.o
obj-$(CONFIG_MFD_MAX8907) += max8907.o
max8925-objs := max8925-core.o max8925-i2c.o
@@ -169,3 +169,6 @@ obj-$(CONFIG_MFD_AS3711) += as3711.o
obj-$(CONFIG_MFD_AS3722) += as3722.o
obj-$(CONFIG_MFD_STW481X) += stw481x.o
obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
+
+intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
+obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 14d9542a4eed..4e6e03d63e12 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -303,7 +303,10 @@ static ssize_t aat2870_reg_write_file(struct file *file,
while (*start == ' ')
start++;
- addr = simple_strtoul(start, &start, 16);
+ ret = kstrtoul(start, 16, &addr);
+ if (ret)
+ return ret;
+
if (addr >= AAT2870_REG_NUM) {
dev_err(aat2870->dev, "Invalid address, 0x%lx\n", addr);
return -EINVAL;
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index b348ae520629..4659ac1db039 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -91,8 +91,8 @@ static int ab3100_set_register_interruptible(struct ab3100 *ab3100,
err);
} else if (err != 2) {
dev_err(ab3100->dev,
- "write error (write register) "
- "%d bytes transferred (expected 2)\n",
+ "write error (write register)\n"
+ " %d bytes transferred (expected 2)\n",
err);
err = -EIO;
} else {
@@ -135,8 +135,8 @@ static int ab3100_set_test_register_interruptible(struct ab3100 *ab3100,
err);
} else if (err != 2) {
dev_err(ab3100->dev,
- "write error (write test register) "
- "%d bytes transferred (expected 2)\n",
+ "write error (write test register)\n"
+ " %d bytes transferred (expected 2)\n",
err);
err = -EIO;
} else {
@@ -171,8 +171,8 @@ static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
goto get_reg_out_unlock;
} else if (err != 1) {
dev_err(ab3100->dev,
- "write error (send register address) "
- "%d bytes transferred (expected 1)\n",
+ "write error (send register address)\n"
+ " %d bytes transferred (expected 1)\n",
err);
err = -EIO;
goto get_reg_out_unlock;
@@ -189,8 +189,8 @@ static int ab3100_get_register_interruptible(struct ab3100 *ab3100,
goto get_reg_out_unlock;
} else if (err != 1) {
dev_err(ab3100->dev,
- "write error (read register) "
- "%d bytes transferred (expected 1)\n",
+ "write error (read register)\n"
+ " %d bytes transferred (expected 1)\n",
err);
err = -EIO;
goto get_reg_out_unlock;
@@ -237,8 +237,8 @@ static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
goto get_reg_page_out_unlock;
} else if (err != 1) {
dev_err(ab3100->dev,
- "write error (send first register address) "
- "%d bytes transferred (expected 1)\n",
+ "write error (send first register address)\n"
+ " %d bytes transferred (expected 1)\n",
err);
err = -EIO;
goto get_reg_page_out_unlock;
@@ -252,8 +252,8 @@ static int ab3100_get_register_page_interruptible(struct ab3100 *ab3100,
goto get_reg_page_out_unlock;
} else if (err != numregs) {
dev_err(ab3100->dev,
- "write error (read register page) "
- "%d bytes transferred (expected %d)\n",
+ "write error (read register page)\n"
+ " %d bytes transferred (expected %d)\n",
err, numregs);
err = -EIO;
goto get_reg_page_out_unlock;
@@ -295,8 +295,8 @@ static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
goto get_maskset_unlock;
} else if (err != 1) {
dev_err(ab3100->dev,
- "write error (maskset send address) "
- "%d bytes transferred (expected 1)\n",
+ "write error (maskset send address)\n"
+ " %d bytes transferred (expected 1)\n",
err);
err = -EIO;
goto get_maskset_unlock;
@@ -310,8 +310,8 @@ static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
goto get_maskset_unlock;
} else if (err != 1) {
dev_err(ab3100->dev,
- "write error (maskset read register) "
- "%d bytes transferred (expected 1)\n",
+ "write error (maskset read register)\n"
+ " %d bytes transferred (expected 1)\n",
err);
err = -EIO;
goto get_maskset_unlock;
@@ -330,8 +330,8 @@ static int ab3100_mask_and_set_register_interruptible(struct ab3100 *ab3100,
goto get_maskset_unlock;
} else if (err != 2) {
dev_err(ab3100->dev,
- "write error (write register) "
- "%d bytes transferred (expected 2)\n",
+ "write error (write register)\n"
+ " %d bytes transferred (expected 2)\n",
err);
err = -EIO;
goto get_maskset_unlock;
@@ -371,7 +371,7 @@ EXPORT_SYMBOL(ab3100_event_register);
int ab3100_event_unregister(struct ab3100 *ab3100,
struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&ab3100->event_subscribers,
+ return blocking_notifier_chain_unregister(&ab3100->event_subscribers,
nb);
}
EXPORT_SYMBOL(ab3100_event_unregister);
@@ -455,7 +455,7 @@ static int ab3100_registers_print(struct seq_file *s, void *p)
u8 value;
u8 reg;
- seq_printf(s, "AB3100 registers:\n");
+ seq_puts(s, "AB3100 registers:\n");
for (reg = 0; reg < 0xff; reg++) {
ab3100_get_register_interruptible(ab3100, reg, &value);
@@ -560,8 +560,8 @@ static ssize_t ab3100_get_set_reg(struct file *file,
ab3100_get_register_interruptible(ab3100, user_reg, &regvalue);
dev_info(ab3100->dev,
- "debug write reg[0x%02x] with 0x%02x, "
- "after readback: 0x%02x\n",
+ "debug write reg[0x%02x]\n"
+ " with 0x%02x, after readback: 0x%02x\n",
user_reg, user_value, regvalue);
}
return buf_size;
@@ -719,8 +719,7 @@ static int ab3100_setup(struct ab3100 *ab3100)
*/
if (ab3100->chip_id == 0xc4) {
dev_warn(ab3100->dev,
- "AB3100 P1E variant detected, "
- "forcing chip to 32KHz\n");
+ "AB3100 P1E variant detected forcing chip to 32KHz\n");
err = ab3100_set_test_register_interruptible(ab3100,
0x02, 0x08);
}
@@ -878,8 +877,7 @@ static int ab3100_probe(struct i2c_client *client,
&ab3100->chip_id);
if (err) {
dev_err(&client->dev,
- "could not communicate with the AB3100 analog "
- "baseband chip\n");
+ "failed to communicate with AB3100 chip\n");
goto exit_no_detect;
}
@@ -902,8 +900,8 @@ static int ab3100_probe(struct i2c_client *client,
if (ids[i].id == 0x0) {
dev_err(&client->dev, "unknown analog baseband chip id: 0x%x\n",
ab3100->chip_id);
- dev_err(&client->dev, "accepting it anyway. Please update "
- "the driver.\n");
+ dev_err(&client->dev,
+ "accepting it anyway. Please update the driver.\n");
goto exit_no_detect;
}
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index cf2e6a198c6b..ce48aa72bb42 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -148,8 +148,8 @@ static const int ab9540_irq_regoffset[AB9540_NUM_IRQ_REGS] = {
/* AB8540 support */
static const int ab8540_irq_regoffset[AB8540_NUM_IRQ_REGS] = {
- 0, 1, 2, 3, 4, -1, -1, -1, -1, 11, 18, 19, 20, 21, 12, 13, 24, 5, 22, 23,
- 25, 26, 27, 28, 29, 30, 31,
+ 0, 1, 2, 3, 4, -1, -1, -1, -1, 11, 18, 19, 20, 21, 12, 13, 24, 5, 22,
+ 23, 25, 26, 27, 28, 29, 30, 31,
};
static const char ab8500_version_str[][7] = {
@@ -322,7 +322,7 @@ static int ab8500_mask_and_set_register(struct device *dev,
struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
atomic_inc(&ab8500->transfer_ongoing);
- ret= mask_and_set_register_interruptible(ab8500, bank, reg,
+ ret = mask_and_set_register_interruptible(ab8500, bank, reg,
bitmask, bitvalues);
atomic_dec(&ab8500->transfer_ongoing);
return ret;
@@ -415,9 +415,11 @@ static void ab8500_irq_unmask(struct irq_data *data)
if (type & IRQ_TYPE_EDGE_FALLING) {
if (offset >= AB8500_INT_GPIO6R && offset <= AB8500_INT_GPIO41R)
ab8500->mask[index + 2] &= ~mask;
- else if (offset >= AB9540_INT_GPIO50R && offset <= AB9540_INT_GPIO54R)
+ else if (offset >= AB9540_INT_GPIO50R &&
+ offset <= AB9540_INT_GPIO54R)
ab8500->mask[index + 1] &= ~mask;
- else if (offset == AB8540_INT_GPIO43R || offset == AB8540_INT_GPIO44R)
+ else if (offset == AB8540_INT_GPIO43R ||
+ offset == AB8540_INT_GPIO44R)
/* Here the falling IRQ is one bit lower */
ab8500->mask[index] &= ~(mask << 1);
else
@@ -451,7 +453,7 @@ static void update_latch_offset(u8 *offset, int i)
/* Fix inconsistent ab8540 bit mapping... */
if (unlikely(*offset == 16))
*offset = 25;
- if ((i==3) && (*offset >= 24))
+ if ((i == 3) && (*offset >= 24))
*offset += 2;
}
@@ -573,8 +575,8 @@ static int ab8500_irq_map(struct irq_domain *d, unsigned int virq,
}
static struct irq_domain_ops ab8500_irq_ops = {
- .map = ab8500_irq_map,
- .xlate = irq_domain_xlate_twocell,
+ .map = ab8500_irq_map,
+ .xlate = irq_domain_xlate_twocell,
};
static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
@@ -607,8 +609,8 @@ int ab8500_suspend(struct ab8500 *ab8500)
{
if (atomic_read(&ab8500->transfer_ongoing))
return -EINVAL;
- else
- return 0;
+
+ return 0;
}
static struct resource ab8500_gpadc_resources[] = {
@@ -1551,7 +1553,7 @@ static struct attribute_group ab9540_attr_group = {
static int ab8500_probe(struct platform_device *pdev)
{
- static char *switch_off_status[] = {
+ static const char *switch_off_status[] = {
"Swoff bit programming",
"Thermal protection activation",
"Vbat lower then BattOk falling threshold",
@@ -1560,7 +1562,7 @@ static int ab8500_probe(struct platform_device *pdev)
"Battery level lower than power on reset threshold",
"Power on key 1 pressed longer than 10 seconds",
"DB8500 thermal shutdown"};
- static char *turn_on_status[] = {
+ static const char *turn_on_status[] = {
"Battery rising (Vbat)",
"Power On Key 1 dbF",
"Power On Key 2 dbF",
@@ -1579,7 +1581,7 @@ static int ab8500_probe(struct platform_device *pdev)
int i;
u8 value;
- ab8500 = devm_kzalloc(&pdev->dev, sizeof *ab8500, GFP_KERNEL);
+ ab8500 = devm_kzalloc(&pdev->dev, sizeof(*ab8500), GFP_KERNEL);
if (!ab8500)
return -ENOMEM;
@@ -1636,7 +1638,7 @@ static int ab8500_probe(struct platform_device *pdev)
ab8500->mask_size = AB8540_NUM_IRQ_REGS;
ab8500->irq_reg_offset = ab8540_irq_regoffset;
ab8500->it_latchhier_num = AB8540_IT_LATCHHIER_NUM;
- }/* Configure AB8500 or AB9540 IRQ */
+ } /* Configure AB8500 or AB9540 IRQ */
else if (is_ab9540(ab8500) || is_ab8505(ab8500)) {
ab8500->mask_size = AB9540_NUM_IRQ_REGS;
ab8500->irq_reg_offset = ab9540_irq_regoffset;
@@ -1646,10 +1648,12 @@ static int ab8500_probe(struct platform_device *pdev)
ab8500->irq_reg_offset = ab8500_irq_regoffset;
ab8500->it_latchhier_num = AB8500_IT_LATCHHIER_NUM;
}
- ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
+ ab8500->mask = devm_kzalloc(&pdev->dev, ab8500->mask_size,
+ GFP_KERNEL);
if (!ab8500->mask)
return -ENOMEM;
- ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size, GFP_KERNEL);
+ ab8500->oldmask = devm_kzalloc(&pdev->dev, ab8500->mask_size,
+ GFP_KERNEL);
if (!ab8500->oldmask)
return -ENOMEM;
@@ -1674,14 +1678,13 @@ static int ab8500_probe(struct platform_device *pdev)
if (value) {
for (i = 0; i < ARRAY_SIZE(switch_off_status); i++) {
if (value & 1)
- printk(KERN_CONT " \"%s\"",
- switch_off_status[i]);
+ pr_cont(" \"%s\"", switch_off_status[i]);
value = value >> 1;
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
} else {
- printk(KERN_CONT " None\n");
+ pr_cont(" None\n");
}
ret = get_register_interruptible(ab8500, AB8500_SYS_CTRL1_BLOCK,
AB8500_TURN_ON_STATUS, &value);
@@ -1692,12 +1695,12 @@ static int ab8500_probe(struct platform_device *pdev)
if (value) {
for (i = 0; i < ARRAY_SIZE(turn_on_status); i++) {
if (value & 1)
- printk("\"%s\" ", turn_on_status[i]);
+ pr_cont("\"%s\" ", turn_on_status[i]);
value = value >> 1;
}
- printk("\n");
+ pr_cont("\n");
} else {
- printk("None\n");
+ pr_cont("None\n");
}
if (plat && plat->init)
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index d1a22aae2df5..b2c7e3b1edfa 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -135,10 +135,10 @@ struct ab8500_prcmu_ranges {
/* hwreg- "mask" and "shift" entries ressources */
struct hwreg_cfg {
u32 bank; /* target bank */
- u32 addr; /* target address */
+ unsigned long addr; /* target address */
uint fmt; /* format */
- uint mask; /* read/write mask, applied before any bit shift */
- int shift; /* bit shift (read:right shift, write:left shift */
+ unsigned long mask; /* read/write mask, applied before any bit shift */
+ long shift; /* bit shift (read:right shift, write:left shift */
};
/* fmt bit #0: 0=hexa, 1=dec */
#define REG_FMT_DEC(c) ((c)->fmt & 0x1)
@@ -1304,16 +1304,17 @@ static int ab8500_registers_print(struct device *dev, u32 bank,
}
if (s) {
- err = seq_printf(s, " [0x%02X/0x%02X]: 0x%02X\n",
- bank, reg, value);
+ err = seq_printf(s,
+ " [0x%02X/0x%02X]: 0x%02X\n",
+ bank, reg, value);
if (err < 0) {
/* Error is not returned here since
* the output is wanted in any case */
return 0;
}
} else {
- printk(KERN_INFO" [0x%02X/0x%02X]: 0x%02X\n",
- bank, reg, value);
+ dev_info(dev, " [0x%02X/0x%02X]: 0x%02X\n",
+ bank, reg, value);
}
}
}
@@ -1325,7 +1326,7 @@ static int ab8500_print_bank_registers(struct seq_file *s, void *p)
struct device *dev = s->private;
u32 bank = debug_bank;
- seq_printf(s, AB8500_NAME_STRING " register values:\n");
+ seq_puts(s, AB8500_NAME_STRING " register values:\n");
seq_printf(s, " bank 0x%02X:\n", bank);
@@ -1350,12 +1351,11 @@ static int ab8500_print_all_banks(struct seq_file *s, void *p)
{
struct device *dev = s->private;
unsigned int i;
- int err;
- seq_printf(s, AB8500_NAME_STRING " register values:\n");
+ seq_puts(s, AB8500_NAME_STRING " register values:\n");
for (i = 0; i < AB8500_NUM_BANKS; i++) {
- err = seq_printf(s, " bank 0x%02X:\n", i);
+ seq_printf(s, " bank 0x%02X:\n", i);
ab8500_registers_print(dev, i, s);
}
@@ -1367,10 +1367,10 @@ void ab8500_dump_all_banks(struct device *dev)
{
unsigned int i;
- printk(KERN_INFO"ab8500 register values:\n");
+ dev_info(dev, "ab8500 register values:\n");
for (i = 1; i < AB8500_NUM_BANKS; i++) {
- printk(KERN_INFO" bank 0x%02X:\n", i);
+ dev_info(dev, " bank 0x%02X:\n", i);
ab8500_registers_print(dev, i, NULL);
}
}
@@ -1384,8 +1384,6 @@ static struct ab8500_register_dump
u8 value;
} ab8500_complete_register_dump[DUMP_MAX_REGS];
-extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
-
/* This shall only be called upon kernel panic! */
void ab8500_dump_all_banks_to_mem(void)
{
@@ -1393,8 +1391,7 @@ void ab8500_dump_all_banks_to_mem(void)
u8 bank;
int err = 0;
- pr_info("Saving all ABB registers at \"ab8500_complete_register_dump\" "
- "for crash analyze.\n");
+ pr_info("Saving all ABB registers for crash analysis.\n");
for (bank = 0; bank < AB8500_NUM_BANKS; bank++) {
for (i = 0; i < debug_ranges[bank].num_ranges; i++) {
@@ -1564,7 +1561,7 @@ static ssize_t ab8500_val_write(struct file *file,
err = abx500_set_register_interruptible(dev,
(u8)debug_bank, debug_address, (u8)user_val);
if (err < 0) {
- printk(KERN_ERR "abx500_set_reg failed %d, %d", err, __LINE__);
+ pr_err("abx500_set_reg failed %d, %d", err, __LINE__);
return -EINVAL;
}
@@ -1596,7 +1593,7 @@ static int ab8500_interrupts_print(struct seq_file *s, void *p)
{
int line;
- seq_printf(s, "name: number: number of: wake:\n");
+ seq_puts(s, "name: number: number of: wake:\n");
for (line = 0; line < num_interrupt_lines; line++) {
struct irq_desc *desc = irq_to_desc(line + irq_first);
@@ -1722,7 +1719,8 @@ static int ab8500_print_modem_registers(struct seq_file *s, void *p)
static int ab8500_modem_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_print_modem_registers, inode->i_private);
+ return single_open(file, ab8500_print_modem_registers,
+ inode->i_private);
}
static const struct file_operations ab8500_modem_fops = {
@@ -1751,7 +1749,8 @@ static int ab8500_gpadc_bat_ctrl_print(struct seq_file *s, void *p)
static int ab8500_gpadc_bat_ctrl_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_gpadc_bat_ctrl_print, inode->i_private);
+ return single_open(file, ab8500_gpadc_bat_ctrl_print,
+ inode->i_private);
}
static const struct file_operations ab8500_gpadc_bat_ctrl_fops = {
@@ -1781,7 +1780,8 @@ static int ab8500_gpadc_btemp_ball_print(struct seq_file *s, void *p)
static int ab8500_gpadc_btemp_ball_open(struct inode *inode,
struct file *file)
{
- return single_open(file, ab8500_gpadc_btemp_ball_print, inode->i_private);
+ return single_open(file, ab8500_gpadc_btemp_ball_print,
+ inode->i_private);
}
static const struct file_operations ab8500_gpadc_btemp_ball_fops = {
@@ -1962,7 +1962,8 @@ static int ab8500_gpadc_main_bat_v_print(struct seq_file *s, void *p)
static int ab8500_gpadc_main_bat_v_open(struct inode *inode,
struct file *file)
{
- return single_open(file, ab8500_gpadc_main_bat_v_print, inode->i_private);
+ return single_open(file, ab8500_gpadc_main_bat_v_print,
+ inode->i_private);
}
static const struct file_operations ab8500_gpadc_main_bat_v_fops = {
@@ -2082,7 +2083,8 @@ static int ab8500_gpadc_bk_bat_v_print(struct seq_file *s, void *p)
static int ab8500_gpadc_bk_bat_v_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_gpadc_bk_bat_v_print, inode->i_private);
+ return single_open(file, ab8500_gpadc_bk_bat_v_print,
+ inode->i_private);
}
static const struct file_operations ab8500_gpadc_bk_bat_v_fops = {
@@ -2111,7 +2113,8 @@ static int ab8500_gpadc_die_temp_print(struct seq_file *s, void *p)
static int ab8500_gpadc_die_temp_open(struct inode *inode, struct file *file)
{
- return single_open(file, ab8500_gpadc_die_temp_print, inode->i_private);
+ return single_open(file, ab8500_gpadc_die_temp_print,
+ inode->i_private);
}
static const struct file_operations ab8500_gpadc_die_temp_fops = {
@@ -2190,8 +2193,9 @@ static int ab8540_gpadc_vbat_true_meas_print(struct seq_file *s, void *p)
gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
vbat_true_meas_raw = ab8500_gpadc_read_raw(gpadc, VBAT_TRUE_MEAS,
avg_sample, trig_edge, trig_timer, conv_type);
- vbat_true_meas_convert = ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS,
- vbat_true_meas_raw);
+ vbat_true_meas_convert =
+ ab8500_gpadc_ad_to_voltage(gpadc, VBAT_TRUE_MEAS,
+ vbat_true_meas_raw);
return seq_printf(s, "%d,0x%X\n",
vbat_true_meas_convert, vbat_true_meas_raw);
@@ -2285,7 +2289,8 @@ static const struct file_operations ab8540_gpadc_vbat_meas_and_ibat_fops = {
.owner = THIS_MODULE,
};
-static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s, void *p)
+static int ab8540_gpadc_vbat_true_meas_and_ibat_print(struct seq_file *s,
+ void *p)
{
int vbat_true_meas_raw;
int vbat_true_meas_convert;
@@ -2314,7 +2319,8 @@ static int ab8540_gpadc_vbat_true_meas_and_ibat_open(struct inode *inode,
inode->i_private);
}
-static const struct file_operations ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
+static const struct file_operations
+ab8540_gpadc_vbat_true_meas_and_ibat_fops = {
.open = ab8540_gpadc_vbat_true_meas_and_ibat_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2368,14 +2374,15 @@ static int ab8540_gpadc_otp_cal_print(struct seq_file *s, void *p)
ab8540_gpadc_get_otp(gpadc, &vmain_l, &vmain_h, &btemp_l, &btemp_h,
&vbat_l, &vbat_h, &ibat_l, &ibat_h);
return seq_printf(s, "VMAIN_L:0x%X\n"
- "VMAIN_H:0x%X\n"
- "BTEMP_L:0x%X\n"
- "BTEMP_H:0x%X\n"
- "VBAT_L:0x%X\n"
- "VBAT_H:0x%X\n"
- "IBAT_L:0x%X\n"
- "IBAT_H:0x%X\n",
- vmain_l, vmain_h, btemp_l, btemp_h, vbat_l, vbat_h, ibat_l, ibat_h);
+ "VMAIN_H:0x%X\n"
+ "BTEMP_L:0x%X\n"
+ "BTEMP_H:0x%X\n"
+ "VBAT_L:0x%X\n"
+ "VBAT_H:0x%X\n"
+ "IBAT_L:0x%X\n"
+ "IBAT_H:0x%X\n",
+ vmain_l, vmain_h, btemp_l, btemp_h,
+ vbat_l, vbat_h, ibat_l, ibat_h);
}
static int ab8540_gpadc_otp_cal_open(struct inode *inode, struct file *file)
@@ -2419,8 +2426,8 @@ static ssize_t ab8500_gpadc_avg_sample_write(struct file *file,
|| (user_avg_sample == SAMPLE_16)) {
avg_sample = (u8) user_avg_sample;
} else {
- dev_err(dev, "debugfs error input: "
- "should be egal to 1, 4, 8 or 16\n");
+ dev_err(dev,
+ "debugfs err input: should be egal to 1, 4, 8 or 16\n");
return -EINVAL;
}
@@ -2504,14 +2511,14 @@ static ssize_t ab8500_gpadc_trig_timer_write(struct file *file,
if (err)
return err;
- if ((user_trig_timer >= 0) && (user_trig_timer <= 255)) {
- trig_timer = (u8) user_trig_timer;
- } else {
- dev_err(dev, "debugfs error input: "
- "should be beetween 0 to 255\n");
+ if (user_trig_timer & ~0xFF) {
+ dev_err(dev,
+ "debugfs error input: should be beetween 0 to 255\n");
return -EINVAL;
}
+ trig_timer = (u8) user_trig_timer;
+
return count;
}
@@ -2579,6 +2586,7 @@ static const struct file_operations ab8500_gpadc_conv_type_fops = {
static int strval_len(char *b)
{
char *s = b;
+
if ((*s == '0') && ((*(s+1) == 'x') || (*(s+1) == 'X'))) {
s += 2;
for (; *s && (*s != ' ') && (*s != '\n'); s++) {
@@ -2643,13 +2651,17 @@ static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg,
b += (*(b+2) == ' ') ? 3 : 6;
if (strval_len(b) == 0)
return -EINVAL;
- loc.mask = simple_strtoul(b, &b, 0);
+ ret = kstrtoul(b, 0, &loc.mask);
+ if (ret)
+ return ret;
} else if ((!strncmp(b, "-s ", 3)) ||
(!strncmp(b, "-shift ", 7))) {
b += (*(b+2) == ' ') ? 3 : 7;
if (strval_len(b) == 0)
return -EINVAL;
- loc.shift = simple_strtol(b, &b, 0);
+ ret = kstrtol(b, 0, &loc.shift);
+ if (ret)
+ return ret;
} else {
return -EINVAL;
}
@@ -2657,29 +2669,36 @@ static ssize_t hwreg_common_write(char *b, struct hwreg_cfg *cfg,
/* get arg BANK and ADDRESS */
if (strval_len(b) == 0)
return -EINVAL;
- loc.bank = simple_strtoul(b, &b, 0);
+ ret = kstrtouint(b, 0, &loc.bank);
+ if (ret)
+ return ret;
while (*b == ' ')
b++;
if (strval_len(b) == 0)
return -EINVAL;
- loc.addr = simple_strtoul(b, &b, 0);
+ ret = kstrtoul(b, 0, &loc.addr);
+ if (ret)
+ return ret;
if (write) {
while (*b == ' ')
b++;
if (strval_len(b) == 0)
return -EINVAL;
- val = simple_strtoul(b, &b, 0);
+ ret = kstrtouint(b, 0, &val);
+ if (ret)
+ return ret;
}
/* args are ok, update target cfg (mainly for read) */
*cfg = loc;
#ifdef ABB_HWREG_DEBUG
- pr_warn("HWREG request: %s, %s, addr=0x%08X, mask=0x%X, shift=%d"
- "value=0x%X\n", (write) ? "write" : "read",
- REG_FMT_DEC(cfg) ? "decimal" : "hexa",
- cfg->addr, cfg->mask, cfg->shift, val);
+ pr_warn("HWREG request: %s, %s,\n"
+ " addr=0x%08X, mask=0x%X, shift=%d" "value=0x%X\n",
+ (write) ? "write" : "read",
+ REG_FMT_DEC(cfg) ? "decimal" : "hexa",
+ cfg->addr, cfg->mask, cfg->shift, val);
#endif
if (!write)
@@ -2765,8 +2784,8 @@ static ssize_t show_irq(struct device *dev,
irq_index = name - irq_first;
if (irq_index >= num_irqs)
return -EINVAL;
- else
- return sprintf(buf, "%u\n", irq_count[irq_index]);
+
+ return sprintf(buf, "%u\n", irq_count[irq_index]);
}
static ssize_t ab8500_subscribe_write(struct file *file,
@@ -2815,7 +2834,7 @@ static ssize_t ab8500_subscribe_write(struct file *file,
dev_attr[irq_index]->attr.mode = S_IRUGO;
err = sysfs_create_file(&dev->kobj, &dev_attr[irq_index]->attr);
if (err < 0) {
- printk(KERN_ERR "sysfs_create_file failed %d\n", err);
+ pr_info("sysfs_create_file failed %d\n", err);
return err;
}
@@ -2823,8 +2842,8 @@ static ssize_t ab8500_subscribe_write(struct file *file,
IRQF_SHARED | IRQF_NO_SUSPEND,
"ab8500-debug", &dev->kobj);
if (err < 0) {
- printk(KERN_ERR "request_threaded_irq failed %d, %lu\n",
- err, user_val);
+ pr_info("request_threaded_irq failed %d, %lu\n",
+ err, user_val);
sysfs_remove_file(&dev->kobj, &dev_attr[irq_index]->attr);
return err;
}
@@ -2946,6 +2965,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
struct dentry *file;
struct ab8500 *ab8500;
struct resource *res;
+
debug_bank = AB8500_MISC;
debug_address = AB8500_REV_REG & 0x00FF;
@@ -2958,7 +2978,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
return -ENOMEM;
dev_attr = devm_kzalloc(&plf->dev,
- sizeof(*dev_attr)*num_irqs,GFP_KERNEL);
+ sizeof(*dev_attr)*num_irqs, GFP_KERNEL);
if (!dev_attr)
return -ENOMEM;
@@ -2969,23 +2989,20 @@ static int ab8500_debug_probe(struct platform_device *plf)
res = platform_get_resource_byname(plf, 0, "IRQ_AB8500");
if (!res) {
- dev_err(&plf->dev, "AB8500 irq not found, err %d\n",
- irq_first);
- return ENXIO;
+ dev_err(&plf->dev, "AB8500 irq not found, err %d\n", irq_first);
+ return -ENXIO;
}
irq_ab8500 = res->start;
irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
if (irq_first < 0) {
- dev_err(&plf->dev, "First irq not found, err %d\n",
- irq_first);
+ dev_err(&plf->dev, "First irq not found, err %d\n", irq_first);
return irq_first;
}
irq_last = platform_get_irq_byname(plf, "IRQ_LAST");
if (irq_last < 0) {
- dev_err(&plf->dev, "Last irq not found, err %d\n",
- irq_last);
+ dev_err(&plf->dev, "Last irq not found, err %d\n", irq_last);
return irq_last;
}
@@ -2994,37 +3011,41 @@ static int ab8500_debug_probe(struct platform_device *plf)
goto err;
ab8500_gpadc_dir = debugfs_create_dir(AB8500_ADC_NAME_STRING,
- ab8500_dir);
+ ab8500_dir);
if (!ab8500_gpadc_dir)
goto err;
- file = debugfs_create_file("all-bank-registers", S_IRUGO,
- ab8500_dir, &plf->dev, &ab8500_registers_fops);
+ file = debugfs_create_file("all-bank-registers", S_IRUGO, ab8500_dir,
+ &plf->dev, &ab8500_registers_fops);
if (!file)
goto err;
- file = debugfs_create_file("all-banks", S_IRUGO,
- ab8500_dir, &plf->dev, &ab8500_all_banks_fops);
+ file = debugfs_create_file("all-banks", S_IRUGO, ab8500_dir,
+ &plf->dev, &ab8500_all_banks_fops);
if (!file)
goto err;
- file = debugfs_create_file("register-bank", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_bank_fops);
+ file = debugfs_create_file("register-bank",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_bank_fops);
if (!file)
goto err;
- file = debugfs_create_file("register-address", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_address_fops);
+ file = debugfs_create_file("register-address",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_address_fops);
if (!file)
goto err;
- file = debugfs_create_file("register-value", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_val_fops);
+ file = debugfs_create_file("register-value",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_val_fops);
if (!file)
goto err;
- file = debugfs_create_file("irq-subscribe", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_subscribe_fops);
+ file = debugfs_create_file("irq-subscribe",
+ (S_IRUGO | S_IWUSR | S_IWGRP), ab8500_dir,
+ &plf->dev, &ab8500_subscribe_fops);
if (!file)
goto err;
@@ -3042,158 +3063,191 @@ static int ab8500_debug_probe(struct platform_device *plf)
num_interrupt_lines = AB8540_NR_IRQS;
}
- file = debugfs_create_file("interrupts", (S_IRUGO),
- ab8500_dir, &plf->dev, &ab8500_interrupts_fops);
+ file = debugfs_create_file("interrupts", (S_IRUGO), ab8500_dir,
+ &plf->dev, &ab8500_interrupts_fops);
if (!file)
goto err;
- file = debugfs_create_file("irq-unsubscribe", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_unsubscribe_fops);
+ file = debugfs_create_file("irq-unsubscribe",
+ (S_IRUGO | S_IWUSR | S_IWGRP), ab8500_dir,
+ &plf->dev, &ab8500_unsubscribe_fops);
if (!file)
goto err;
file = debugfs_create_file("hwreg", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
+ ab8500_dir, &plf->dev, &ab8500_hwreg_fops);
if (!file)
goto err;
- file = debugfs_create_file("all-modem-registers", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_dir, &plf->dev, &ab8500_modem_fops);
+ file = debugfs_create_file("all-modem-registers",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_dir, &plf->dev, &ab8500_modem_fops);
if (!file)
goto err;
file = debugfs_create_file("bat_ctrl", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bat_ctrl_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_bat_ctrl_fops);
if (!file)
goto err;
file = debugfs_create_file("btemp_ball", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_btemp_ball_fops);
+ ab8500_gpadc_dir,
+ &plf->dev, &ab8500_gpadc_btemp_ball_fops);
if (!file)
goto err;
- file = debugfs_create_file("main_charger_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_v_fops);
+ file = debugfs_create_file("main_charger_v",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_main_charger_v_fops);
if (!file)
goto err;
- file = debugfs_create_file("acc_detect1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect1_fops);
+ file = debugfs_create_file("acc_detect1",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_acc_detect1_fops);
if (!file)
goto err;
- file = debugfs_create_file("acc_detect2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_acc_detect2_fops);
+ file = debugfs_create_file("acc_detect2",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_acc_detect2_fops);
if (!file)
goto err;
file = debugfs_create_file("adc_aux1", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux1_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_aux1_fops);
if (!file)
goto err;
file = debugfs_create_file("adc_aux2", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_aux2_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_aux2_fops);
if (!file)
goto err;
file = debugfs_create_file("main_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_bat_v_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_main_bat_v_fops);
if (!file)
goto err;
file = debugfs_create_file("vbus_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_vbus_v_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_vbus_v_fops);
if (!file)
goto err;
- file = debugfs_create_file("main_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_main_charger_c_fops);
+ file = debugfs_create_file("main_charger_c",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_main_charger_c_fops);
if (!file)
goto err;
- file = debugfs_create_file("usb_charger_c", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
+ file = debugfs_create_file("usb_charger_c",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir,
+ &plf->dev, &ab8500_gpadc_usb_charger_c_fops);
if (!file)
goto err;
file = debugfs_create_file("bk_bat_v", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_bk_bat_v_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_bk_bat_v_fops);
if (!file)
goto err;
file = debugfs_create_file("die_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_die_temp_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_die_temp_fops);
if (!file)
goto err;
file = debugfs_create_file("usb_id", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_usb_id_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_usb_id_fops);
if (!file)
goto err;
if (is_ab8540(ab8500)) {
- file = debugfs_create_file("xtal_temp", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8540_gpadc_xtal_temp_fops);
+ file = debugfs_create_file("xtal_temp",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_xtal_temp_fops);
if (!file)
goto err;
- file = debugfs_create_file("vbattruemeas", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev,
- &ab8540_gpadc_vbat_true_meas_fops);
+ file = debugfs_create_file("vbattruemeas",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_vbat_true_meas_fops);
if (!file)
goto err;
file = debugfs_create_file("batctrl_and_ibat",
- (S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
- &plf->dev, &ab8540_gpadc_bat_ctrl_and_ibat_fops);
+ (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir,
+ &plf->dev,
+ &ab8540_gpadc_bat_ctrl_and_ibat_fops);
if (!file)
goto err;
file = debugfs_create_file("vbatmeas_and_ibat",
- (S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
- &plf->dev,
- &ab8540_gpadc_vbat_meas_and_ibat_fops);
+ (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8540_gpadc_vbat_meas_and_ibat_fops);
if (!file)
goto err;
file = debugfs_create_file("vbattruemeas_and_ibat",
- (S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
- &plf->dev,
- &ab8540_gpadc_vbat_true_meas_and_ibat_fops);
+ (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir,
+ &plf->dev,
+ &ab8540_gpadc_vbat_true_meas_and_ibat_fops);
if (!file)
goto err;
file = debugfs_create_file("battemp_and_ibat",
- (S_IRUGO | S_IWUGO), ab8500_gpadc_dir,
+ (S_IRUGO | S_IWUGO),
+ ab8500_gpadc_dir,
&plf->dev, &ab8540_gpadc_bat_temp_and_ibat_fops);
if (!file)
goto err;
- file = debugfs_create_file("otp_calib", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8540_gpadc_otp_calib_fops);
+ file = debugfs_create_file("otp_calib",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ ab8500_gpadc_dir,
+ &plf->dev, &ab8540_gpadc_otp_calib_fops);
if (!file)
goto err;
}
file = debugfs_create_file("avg_sample", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_avg_sample_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_avg_sample_fops);
if (!file)
goto err;
file = debugfs_create_file("trig_edge", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_trig_edge_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_trig_edge_fops);
if (!file)
goto err;
file = debugfs_create_file("trig_timer", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_trig_timer_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_trig_timer_fops);
if (!file)
goto err;
file = debugfs_create_file("conv_type", (S_IRUGO | S_IWUSR | S_IWGRP),
- ab8500_gpadc_dir, &plf->dev, &ab8500_gpadc_conv_type_fops);
+ ab8500_gpadc_dir, &plf->dev,
+ &ab8500_gpadc_conv_type_fops);
if (!file)
goto err;
return 0;
err:
- if (ab8500_dir)
- debugfs_remove_recursive(ab8500_dir);
+ debugfs_remove_recursive(ab8500_dir);
dev_err(&plf->dev, "failed to create debugfs entries.\n");
return -ENOMEM;
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index cfc191abae4a..10a0cb90619a 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -123,6 +123,8 @@ static irqreturn_t arizona_underclocked(int irq, void *data)
dev_err(arizona->dev, "AIF2 underclocked\n");
if (val & ARIZONA_AIF1_UNDERCLOCKED_STS)
dev_err(arizona->dev, "AIF1 underclocked\n");
+ if (val & ARIZONA_ISRC3_UNDERCLOCKED_STS)
+ dev_err(arizona->dev, "ISRC3 underclocked\n");
if (val & ARIZONA_ISRC2_UNDERCLOCKED_STS)
dev_err(arizona->dev, "ISRC2 underclocked\n");
if (val & ARIZONA_ISRC1_UNDERCLOCKED_STS)
@@ -192,6 +194,8 @@ static irqreturn_t arizona_overclocked(int irq, void *data)
dev_err(arizona->dev, "ASRC sync WARP overclocked\n");
if (val[1] & ARIZONA_ADSP2_1_OVERCLOCKED_STS)
dev_err(arizona->dev, "DSP1 overclocked\n");
+ if (val[1] & ARIZONA_ISRC3_OVERCLOCKED_STS)
+ dev_err(arizona->dev, "ISRC3 overclocked\n");
if (val[1] & ARIZONA_ISRC2_OVERCLOCKED_STS)
dev_err(arizona->dev, "ISRC2 overclocked\n");
if (val[1] & ARIZONA_ISRC1_OVERCLOCKED_STS)
@@ -497,12 +501,12 @@ const struct dev_pm_ops arizona_pm_ops = {
EXPORT_SYMBOL_GPL(arizona_pm_ops);
#ifdef CONFIG_OF
-int arizona_of_get_type(struct device *dev)
+unsigned long arizona_of_get_type(struct device *dev)
{
const struct of_device_id *id = of_match_device(arizona_of_match, dev);
if (id)
- return (int)id->data;
+ return (unsigned long)id->data;
else
return 0;
}
@@ -578,17 +582,21 @@ static const struct mfd_cell early_devs[] = {
};
static const char *wm5102_supplies[] = {
+ "MICVDD",
"DBVDD2",
"DBVDD3",
"CPVDD",
"SPKVDDL",
"SPKVDDR",
- "MICVDD",
};
static const struct mfd_cell wm5102_devs[] = {
{ .name = "arizona-micsupp" },
- { .name = "arizona-extcon" },
+ {
+ .name = "arizona-extcon",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = 1, /* We only need MICVDD */
+ },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
@@ -601,7 +609,11 @@ static const struct mfd_cell wm5102_devs[] = {
static const struct mfd_cell wm5110_devs[] = {
{ .name = "arizona-micsupp" },
- { .name = "arizona-extcon" },
+ {
+ .name = "arizona-extcon",
+ .parent_supplies = wm5102_supplies,
+ .num_parent_supplies = 1, /* We only need MICVDD */
+ },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
@@ -613,6 +625,7 @@ static const struct mfd_cell wm5110_devs[] = {
};
static const char *wm8997_supplies[] = {
+ "MICVDD",
"DBVDD2",
"CPVDD",
"SPKVDD",
@@ -620,7 +633,11 @@ static const char *wm8997_supplies[] = {
static const struct mfd_cell wm8997_devs[] = {
{ .name = "arizona-micsupp" },
- { .name = "arizona-extcon" },
+ {
+ .name = "arizona-extcon",
+ .parent_supplies = wm8997_supplies,
+ .num_parent_supplies = 1, /* We only need MICVDD */
+ },
{ .name = "arizona-gpio" },
{ .name = "arizona-haptics" },
{ .name = "arizona-pwm" },
@@ -683,7 +700,13 @@ int arizona_dev_init(struct arizona *arizona)
goto err_early;
}
- arizona->dcvdd = devm_regulator_get(arizona->dev, "DCVDD");
+ /**
+ * Don't use devres here because the only device we have to get
+ * against is the MFD device and DCVDD will likely be supplied by
+ * one of its children. Meaning that the regulator will be
+ * destroyed by the time devres calls regulator put.
+ */
+ arizona->dcvdd = regulator_get(arizona->dev, "DCVDD");
if (IS_ERR(arizona->dcvdd)) {
ret = PTR_ERR(arizona->dcvdd);
dev_err(dev, "Failed to request DCVDD: %d\n", ret);
@@ -697,7 +720,7 @@ int arizona_dev_init(struct arizona *arizona)
"arizona /RESET");
if (ret != 0) {
dev_err(dev, "Failed to request /RESET: %d\n", ret);
- goto err_early;
+ goto err_dcvdd;
}
}
@@ -706,7 +729,7 @@ int arizona_dev_init(struct arizona *arizona)
if (ret != 0) {
dev_err(dev, "Failed to enable core supplies: %d\n",
ret);
- goto err_early;
+ goto err_dcvdd;
}
ret = regulator_enable(arizona->dcvdd);
@@ -1015,6 +1038,8 @@ err_reset:
err_enable:
regulator_bulk_disable(arizona->num_core_supplies,
arizona->core_supplies);
+err_dcvdd:
+ regulator_put(arizona->dcvdd);
err_early:
mfd_remove_devices(dev);
return ret;
@@ -1023,16 +1048,20 @@ EXPORT_SYMBOL_GPL(arizona_dev_init);
int arizona_dev_exit(struct arizona *arizona)
{
+ pm_runtime_disable(arizona->dev);
+
+ regulator_disable(arizona->dcvdd);
+ regulator_put(arizona->dcvdd);
+
mfd_remove_devices(arizona->dev);
arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
arizona_free_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, arizona);
- pm_runtime_disable(arizona->dev);
arizona_irq_exit(arizona);
if (arizona->pdata.reset)
gpio_set_value_cansleep(arizona->pdata.reset, 0);
- regulator_disable(arizona->dcvdd);
- regulator_bulk_disable(ARRAY_SIZE(arizona->core_supplies),
+
+ regulator_bulk_disable(arizona->num_core_supplies,
arizona->core_supplies);
return 0;
}
diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c
index beccb790c9ba..9d4156fb082a 100644
--- a/drivers/mfd/arizona-i2c.c
+++ b/drivers/mfd/arizona-i2c.c
@@ -24,11 +24,12 @@
#include "arizona.h"
static int arizona_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct arizona *arizona;
const struct regmap_config *regmap_config;
- int ret, type;
+ unsigned long type;
+ int ret;
if (i2c->dev.of_node)
type = arizona_of_get_type(&i2c->dev);
diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c
index 17102f589100..d420dbc0e2b0 100644
--- a/drivers/mfd/arizona-irq.c
+++ b/drivers/mfd/arizona-irq.c
@@ -188,24 +188,33 @@ int arizona_irq_init(struct arizona *arizona)
int flags = IRQF_ONESHOT;
int ret, i;
const struct regmap_irq_chip *aod, *irq;
- bool ctrlif_error = true;
struct irq_data *irq_data;
+ arizona->ctrlif_error = true;
+
switch (arizona->type) {
#ifdef CONFIG_MFD_WM5102
case WM5102:
aod = &wm5102_aod;
irq = &wm5102_irq;
- ctrlif_error = false;
+ arizona->ctrlif_error = false;
break;
#endif
#ifdef CONFIG_MFD_WM5110
case WM5110:
aod = &wm5110_aod;
- irq = &wm5110_irq;
- ctrlif_error = false;
+ switch (arizona->rev) {
+ case 0 ... 2:
+ irq = &wm5110_irq;
+ break;
+ default:
+ irq = &wm5110_revd_irq;
+ break;
+ }
+
+ arizona->ctrlif_error = false;
break;
#endif
#ifdef CONFIG_MFD_WM8997
@@ -213,7 +222,7 @@ int arizona_irq_init(struct arizona *arizona)
aod = &wm8997_aod;
irq = &wm8997_irq;
- ctrlif_error = false;
+ arizona->ctrlif_error = false;
break;
#endif
default:
@@ -300,7 +309,7 @@ int arizona_irq_init(struct arizona *arizona)
}
/* Handle control interface errors in the core */
- if (ctrlif_error) {
+ if (arizona->ctrlif_error) {
i = arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR);
ret = request_threaded_irq(i, NULL, arizona_ctrlif_err,
IRQF_ONESHOT,
@@ -345,7 +354,9 @@ int arizona_irq_init(struct arizona *arizona)
return 0;
err_main_irq:
- free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona);
+ if (arizona->ctrlif_error)
+ free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR),
+ arizona);
err_ctrlif:
free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona);
err_boot_done:
@@ -361,7 +372,9 @@ err:
int arizona_irq_exit(struct arizona *arizona)
{
- free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR), arizona);
+ if (arizona->ctrlif_error)
+ free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_CTRLIF_ERR),
+ arizona);
free_irq(arizona_map_irq(arizona, ARIZONA_IRQ_BOOT_DONE), arizona);
regmap_del_irq_chip(irq_create_mapping(arizona->virq, 1),
arizona->irq_chip);
diff --git a/drivers/mfd/arizona-spi.c b/drivers/mfd/arizona-spi.c
index 1ca554b18bef..5145d78bf07e 100644
--- a/drivers/mfd/arizona-spi.c
+++ b/drivers/mfd/arizona-spi.c
@@ -28,7 +28,8 @@ static int arizona_spi_probe(struct spi_device *spi)
const struct spi_device_id *id = spi_get_device_id(spi);
struct arizona *arizona;
const struct regmap_config *regmap_config;
- int ret, type;
+ unsigned long type;
+ int ret;
if (spi->dev.of_node)
type = arizona_of_get_type(&spi->dev);
diff --git a/drivers/mfd/arizona.h b/drivers/mfd/arizona.h
index b4cef777df73..fbe2843271c5 100644
--- a/drivers/mfd/arizona.h
+++ b/drivers/mfd/arizona.h
@@ -36,6 +36,7 @@ extern const struct regmap_irq_chip wm5102_irq;
extern const struct regmap_irq_chip wm5110_aod;
extern const struct regmap_irq_chip wm5110_irq;
+extern const struct regmap_irq_chip wm5110_revd_irq;
extern const struct regmap_irq_chip wm8997_aod;
extern const struct regmap_irq_chip wm8997_irq;
@@ -46,9 +47,9 @@ int arizona_irq_init(struct arizona *arizona);
int arizona_irq_exit(struct arizona *arizona);
#ifdef CONFIG_OF
-int arizona_of_get_type(struct device *dev);
+unsigned long arizona_of_get_type(struct device *dev);
#else
-static inline int arizona_of_get_type(struct device *dev)
+static inline unsigned long arizona_of_get_type(struct device *dev)
{
return 0;
}
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 9f6294f2a070..9fc4186d4132 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -899,13 +899,15 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
ds1wm_resources[0].end >>= asic->bus_shift;
/* MMC */
- asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
+ if (mem_sdio) {
+ asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
mem_sdio->start,
ASIC3_SD_CONFIG_SIZE >> asic->bus_shift);
- if (!asic->tmio_cnf) {
- ret = -ENOMEM;
- dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
- goto out;
+ if (!asic->tmio_cnf) {
+ ret = -ENOMEM;
+ dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
+ goto out;
+ }
}
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 38fe9bf0d169..4873f9c50452 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -25,64 +25,42 @@
#include <linux/mfd/cros_ec_commands.h>
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
- struct cros_ec_msg *msg)
+ struct cros_ec_command *msg)
{
uint8_t *out;
int csum, i;
- BUG_ON(msg->out_len > EC_PROTO2_MAX_PARAM_SIZE);
+ BUG_ON(msg->outsize > EC_PROTO2_MAX_PARAM_SIZE);
out = ec_dev->dout;
out[0] = EC_CMD_VERSION0 + msg->version;
- out[1] = msg->cmd;
- out[2] = msg->out_len;
+ out[1] = msg->command;
+ out[2] = msg->outsize;
csum = out[0] + out[1] + out[2];
- for (i = 0; i < msg->out_len; i++)
- csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->out_buf[i];
- out[EC_MSG_TX_HEADER_BYTES + msg->out_len] = (uint8_t)(csum & 0xff);
+ for (i = 0; i < msg->outsize; i++)
+ csum += out[EC_MSG_TX_HEADER_BYTES + i] = msg->outdata[i];
+ out[EC_MSG_TX_HEADER_BYTES + msg->outsize] = (uint8_t)(csum & 0xff);
- return EC_MSG_TX_PROTO_BYTES + msg->out_len;
+ return EC_MSG_TX_PROTO_BYTES + msg->outsize;
}
EXPORT_SYMBOL(cros_ec_prepare_tx);
-static int cros_ec_command_sendrecv(struct cros_ec_device *ec_dev,
- uint16_t cmd, void *out_buf, int out_len,
- void *in_buf, int in_len)
+int cros_ec_check_result(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
{
- struct cros_ec_msg msg;
-
- msg.version = cmd >> 8;
- msg.cmd = cmd & 0xff;
- msg.out_buf = out_buf;
- msg.out_len = out_len;
- msg.in_buf = in_buf;
- msg.in_len = in_len;
-
- return ec_dev->command_xfer(ec_dev, &msg);
-}
-
-static int cros_ec_command_recv(struct cros_ec_device *ec_dev,
- uint16_t cmd, void *buf, int buf_len)
-{
- return cros_ec_command_sendrecv(ec_dev, cmd, NULL, 0, buf, buf_len);
-}
-
-static int cros_ec_command_send(struct cros_ec_device *ec_dev,
- uint16_t cmd, void *buf, int buf_len)
-{
- return cros_ec_command_sendrecv(ec_dev, cmd, buf, buf_len, NULL, 0);
-}
-
-static irqreturn_t ec_irq_thread(int irq, void *data)
-{
- struct cros_ec_device *ec_dev = data;
-
- if (device_may_wakeup(ec_dev->dev))
- pm_wakeup_event(ec_dev->dev, 0);
-
- blocking_notifier_call_chain(&ec_dev->event_notifier, 1, ec_dev);
-
- return IRQ_HANDLED;
+ switch (msg->result) {
+ case EC_RES_SUCCESS:
+ return 0;
+ case EC_RES_IN_PROGRESS:
+ dev_dbg(ec_dev->dev, "command 0x%02x in progress\n",
+ msg->command);
+ return -EAGAIN;
+ default:
+ dev_dbg(ec_dev->dev, "command 0x%02x returned %d\n",
+ msg->command, msg->result);
+ return 0;
+ }
}
+EXPORT_SYMBOL(cros_ec_check_result);
static const struct mfd_cell cros_devs[] = {
{
@@ -102,12 +80,6 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
struct device *dev = ec_dev->dev;
int err = 0;
- BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
-
- ec_dev->command_send = cros_ec_command_send;
- ec_dev->command_recv = cros_ec_command_recv;
- ec_dev->command_sendrecv = cros_ec_command_sendrecv;
-
if (ec_dev->din_size) {
ec_dev->din = devm_kzalloc(dev, ec_dev->din_size, GFP_KERNEL);
if (!ec_dev->din)
@@ -119,42 +91,23 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
return -ENOMEM;
}
- if (!ec_dev->irq) {
- dev_dbg(dev, "no valid IRQ: %d\n", ec_dev->irq);
- return err;
- }
-
- err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT,
- "chromeos-ec", ec_dev);
- if (err) {
- dev_err(dev, "request irq %d: error %d\n", ec_dev->irq, err);
- return err;
- }
-
err = mfd_add_devices(dev, 0, cros_devs,
ARRAY_SIZE(cros_devs),
NULL, ec_dev->irq, NULL);
if (err) {
dev_err(dev, "failed to add mfd devices\n");
- goto fail_mfd;
+ return err;
}
- dev_info(dev, "Chrome EC (%s)\n", ec_dev->name);
+ dev_info(dev, "Chrome EC device registered\n");
return 0;
-
-fail_mfd:
- free_irq(ec_dev->irq, ec_dev);
-
- return err;
}
EXPORT_SYMBOL(cros_ec_register);
int cros_ec_remove(struct cros_ec_device *ec_dev)
{
mfd_remove_devices(ec_dev->dev);
- free_irq(ec_dev->irq, ec_dev);
return 0;
}
diff --git a/drivers/mfd/cros_ec_i2c.c b/drivers/mfd/cros_ec_i2c.c
index 4f71be99a183..c0c30f4f946f 100644
--- a/drivers/mfd/cros_ec_i2c.c
+++ b/drivers/mfd/cros_ec_i2c.c
@@ -29,12 +29,13 @@ static inline struct cros_ec_device *to_ec_dev(struct device *dev)
return i2c_get_clientdata(client);
}
-static int cros_ec_command_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_msg *msg)
+static int cros_ec_cmd_xfer_i2c(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *msg)
{
struct i2c_client *client = ec_dev->priv;
int ret = -ENOMEM;
int i;
+ int len;
int packet_len;
u8 *out_buf = NULL;
u8 *in_buf = NULL;
@@ -50,7 +51,7 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev,
* allocate larger packet (one byte for checksum, one byte for
* length, and one for result code)
*/
- packet_len = msg->in_len + 3;
+ packet_len = msg->insize + 3;
in_buf = kzalloc(packet_len, GFP_KERNEL);
if (!in_buf)
goto done;
@@ -61,7 +62,7 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev,
* allocate larger packet (one byte for checksum, one for
* command code, one for length, and one for command version)
*/
- packet_len = msg->out_len + 4;
+ packet_len = msg->outsize + 4;
out_buf = kzalloc(packet_len, GFP_KERNEL);
if (!out_buf)
goto done;
@@ -69,16 +70,16 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev,
i2c_msg[0].buf = (char *)out_buf;
out_buf[0] = EC_CMD_VERSION0 + msg->version;
- out_buf[1] = msg->cmd;
- out_buf[2] = msg->out_len;
+ out_buf[1] = msg->command;
+ out_buf[2] = msg->outsize;
/* copy message payload and compute checksum */
sum = out_buf[0] + out_buf[1] + out_buf[2];
- for (i = 0; i < msg->out_len; i++) {
- out_buf[3 + i] = msg->out_buf[i];
+ for (i = 0; i < msg->outsize; i++) {
+ out_buf[3 + i] = msg->outdata[i];
sum += out_buf[3 + i];
}
- out_buf[3 + msg->out_len] = sum;
+ out_buf[3 + msg->outsize] = sum;
/* send command to EC and read answer */
ret = i2c_transfer(client->adapter, i2c_msg, 2);
@@ -92,28 +93,34 @@ static int cros_ec_command_xfer(struct cros_ec_device *ec_dev,
}
/* check response error code */
- if (i2c_msg[1].buf[0]) {
- dev_warn(ec_dev->dev, "command 0x%02x returned an error %d\n",
- msg->cmd, i2c_msg[1].buf[0]);
- ret = -EINVAL;
+ msg->result = i2c_msg[1].buf[0];
+ ret = cros_ec_check_result(ec_dev, msg);
+ if (ret)
+ goto done;
+
+ len = in_buf[1];
+ if (len > msg->insize) {
+ dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
+ len, msg->insize);
+ ret = -ENOSPC;
goto done;
}
/* copy response packet payload and compute checksum */
sum = in_buf[0] + in_buf[1];
- for (i = 0; i < msg->in_len; i++) {
- msg->in_buf[i] = in_buf[2 + i];
+ for (i = 0; i < len; i++) {
+ msg->indata[i] = in_buf[2 + i];
sum += in_buf[2 + i];
}
dev_dbg(ec_dev->dev, "packet: %*ph, sum = %02x\n",
i2c_msg[1].len, in_buf, sum);
- if (sum != in_buf[2 + msg->in_len]) {
+ if (sum != in_buf[2 + len]) {
dev_err(ec_dev->dev, "bad packet checksum\n");
ret = -EBADMSG;
goto done;
}
- ret = 0;
+ ret = len;
done:
kfree(in_buf);
kfree(out_buf);
@@ -132,11 +139,10 @@ static int cros_ec_i2c_probe(struct i2c_client *client,
return -ENOMEM;
i2c_set_clientdata(client, ec_dev);
- ec_dev->name = "I2C";
ec_dev->dev = dev;
ec_dev->priv = client;
ec_dev->irq = client->irq;
- ec_dev->command_xfer = cros_ec_command_xfer;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_i2c;
ec_dev->ec_name = client->name;
ec_dev->phys_name = client->adapter->name;
ec_dev->parent = &client->dev;
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 8c1c7cc373f8..588c700af39c 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -73,7 +73,7 @@
* if no record
* @end_of_msg_delay: used to set the delay_usecs on the spi_transfer that
* is sent when we want to turn off CS at the end of a transaction.
- * @lock: mutex to ensure only one user of cros_ec_command_spi_xfer at a time
+ * @lock: mutex to ensure only one user of cros_ec_cmd_xfer_spi at a time
*/
struct cros_ec_spi {
struct spi_device *spi;
@@ -210,13 +210,13 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
}
/**
- * cros_ec_command_spi_xfer - Transfer a message over SPI and receive the reply
+ * cros_ec_cmd_xfer_spi - Transfer a message over SPI and receive the reply
*
* @ec_dev: ChromeOS EC device
* @ec_msg: Message to transfer
*/
-static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
- struct cros_ec_msg *ec_msg)
+static int cros_ec_cmd_xfer_spi(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
{
struct cros_ec_spi *ec_spi = ec_dev->priv;
struct spi_transfer trans;
@@ -258,23 +258,19 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
/* Get the response */
if (!ret) {
ret = cros_ec_spi_receive_response(ec_dev,
- ec_msg->in_len + EC_MSG_TX_PROTO_BYTES);
+ ec_msg->insize + EC_MSG_TX_PROTO_BYTES);
} else {
dev_err(ec_dev->dev, "spi transfer failed: %d\n", ret);
}
- /* turn off CS */
+ /*
+ * Turn off CS, possibly adding a delay to ensure the rising edge
+ * doesn't come too soon after the end of the data.
+ */
spi_message_init(&msg);
-
- if (ec_spi->end_of_msg_delay) {
- /*
- * Add delay for last transaction, to ensure the rising edge
- * doesn't come too soon after the end of the data.
- */
- memset(&trans, 0, sizeof(trans));
- trans.delay_usecs = ec_spi->end_of_msg_delay;
- spi_message_add_tail(&trans, &msg);
- }
+ memset(&trans, 0, sizeof(trans));
+ trans.delay_usecs = ec_spi->end_of_msg_delay;
+ spi_message_add_tail(&trans, &msg);
final_ret = spi_sync(ec_spi->spi, &msg);
ec_spi->last_transfer_ns = ktime_get_ns();
@@ -285,20 +281,19 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
goto exit;
}
- /* check response error code */
ptr = ec_dev->din;
- if (ptr[0]) {
- dev_warn(ec_dev->dev, "command 0x%02x returned an error %d\n",
- ec_msg->cmd, ptr[0]);
- debug_packet(ec_dev->dev, "in_err", ptr, len);
- ret = -EINVAL;
+
+ /* check response error code */
+ ec_msg->result = ptr[0];
+ ret = cros_ec_check_result(ec_dev, ec_msg);
+ if (ret)
goto exit;
- }
+
len = ptr[1];
sum = ptr[0] + ptr[1];
- if (len > ec_msg->in_len) {
+ if (len > ec_msg->insize) {
dev_err(ec_dev->dev, "packet too long (%d bytes, expected %d)",
- len, ec_msg->in_len);
+ len, ec_msg->insize);
ret = -ENOSPC;
goto exit;
}
@@ -306,8 +301,8 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
/* copy response packet payload and compute checksum */
for (i = 0; i < len; i++) {
sum += ptr[i + 2];
- if (ec_msg->in_len)
- ec_msg->in_buf[i] = ptr[i + 2];
+ if (ec_msg->insize)
+ ec_msg->indata[i] = ptr[i + 2];
}
sum &= 0xff;
@@ -321,7 +316,7 @@ static int cros_ec_command_spi_xfer(struct cros_ec_device *ec_dev,
goto exit;
}
- ret = 0;
+ ret = len;
exit:
mutex_unlock(&ec_spi->lock);
return ret;
@@ -364,11 +359,10 @@ static int cros_ec_spi_probe(struct spi_device *spi)
cros_ec_spi_dt_probe(ec_spi, dev);
spi_set_drvdata(spi, ec_dev);
- ec_dev->name = "SPI";
ec_dev->dev = dev;
ec_dev->priv = ec_spi;
ec_dev->irq = spi->irq;
- ec_dev->command_xfer = cros_ec_command_spi_xfer;
+ ec_dev->cmd_xfer = cros_ec_cmd_xfer_spi;
ec_dev->ec_name = ec_spi->spi->modalias;
ec_dev->phys_name = dev_name(&ec_spi->spi->dev);
ec_dev->parent = &ec_spi->spi->dev;
@@ -381,6 +375,8 @@ static int cros_ec_spi_probe(struct spi_device *spi)
return err;
}
+ device_init_wakeup(&spi->dev, true);
+
return 0;
}
diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c
index e70ae315abc7..93db8bb8c8f0 100644
--- a/drivers/mfd/da9063-core.c
+++ b/drivers/mfd/da9063-core.c
@@ -153,9 +153,9 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq)
"Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n",
model, variant_id);
- if (variant_code != PMIC_DA9063_BB) {
- dev_err(da9063->dev, "Unknown chip variant code: 0x%02X\n",
- variant_code);
+ if (variant_code < PMIC_DA9063_BB && variant_code != PMIC_DA9063_AD) {
+ dev_err(da9063->dev,
+ "Cannot support variant code: 0x%02X\n", variant_code);
return -ENODEV;
}
diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c
index 8db5c805c64f..21fd8d9a217b 100644
--- a/drivers/mfd/da9063-i2c.c
+++ b/drivers/mfd/da9063-i2c.c
@@ -25,10 +25,10 @@
#include <linux/mfd/da9063/pdata.h>
#include <linux/mfd/da9063/registers.h>
-static const struct regmap_range da9063_readable_ranges[] = {
+static const struct regmap_range da9063_ad_readable_ranges[] = {
{
.range_min = DA9063_REG_PAGE_CON,
- .range_max = DA9063_REG_SECOND_D,
+ .range_max = DA9063_AD_REG_SECOND_D,
}, {
.range_min = DA9063_REG_SEQ,
.range_max = DA9063_REG_ID_32_31,
@@ -37,14 +37,14 @@ static const struct regmap_range da9063_readable_ranges[] = {
.range_max = DA9063_REG_AUTO3_LOW,
}, {
.range_min = DA9063_REG_T_OFFSET,
- .range_max = DA9063_REG_GP_ID_19,
+ .range_max = DA9063_AD_REG_GP_ID_19,
}, {
.range_min = DA9063_REG_CHIP_ID,
.range_max = DA9063_REG_CHIP_VARIANT,
},
};
-static const struct regmap_range da9063_writeable_ranges[] = {
+static const struct regmap_range da9063_ad_writeable_ranges[] = {
{
.range_min = DA9063_REG_PAGE_CON,
.range_max = DA9063_REG_PAGE_CON,
@@ -53,7 +53,7 @@ static const struct regmap_range da9063_writeable_ranges[] = {
.range_max = DA9063_REG_VSYS_MON,
}, {
.range_min = DA9063_REG_COUNT_S,
- .range_max = DA9063_REG_ALARM_Y,
+ .range_max = DA9063_AD_REG_ALARM_Y,
}, {
.range_min = DA9063_REG_SEQ,
.range_max = DA9063_REG_ID_32_31,
@@ -62,14 +62,14 @@ static const struct regmap_range da9063_writeable_ranges[] = {
.range_max = DA9063_REG_AUTO3_LOW,
}, {
.range_min = DA9063_REG_CONFIG_I,
- .range_max = DA9063_REG_MON_REG_4,
+ .range_max = DA9063_AD_REG_MON_REG_4,
}, {
- .range_min = DA9063_REG_GP_ID_0,
- .range_max = DA9063_REG_GP_ID_19,
+ .range_min = DA9063_AD_REG_GP_ID_0,
+ .range_max = DA9063_AD_REG_GP_ID_19,
},
};
-static const struct regmap_range da9063_volatile_ranges[] = {
+static const struct regmap_range da9063_ad_volatile_ranges[] = {
{
.range_min = DA9063_REG_STATUS_A,
.range_max = DA9063_REG_EVENT_D,
@@ -81,26 +81,104 @@ static const struct regmap_range da9063_volatile_ranges[] = {
.range_max = DA9063_REG_ADC_MAN,
}, {
.range_min = DA9063_REG_ADC_RES_L,
- .range_max = DA9063_REG_SECOND_D,
+ .range_max = DA9063_AD_REG_SECOND_D,
}, {
- .range_min = DA9063_REG_MON_REG_5,
- .range_max = DA9063_REG_MON_REG_6,
+ .range_min = DA9063_AD_REG_MON_REG_5,
+ .range_max = DA9063_AD_REG_MON_REG_6,
},
};
-static const struct regmap_access_table da9063_readable_table = {
- .yes_ranges = da9063_readable_ranges,
- .n_yes_ranges = ARRAY_SIZE(da9063_readable_ranges),
+static const struct regmap_access_table da9063_ad_readable_table = {
+ .yes_ranges = da9063_ad_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_ad_readable_ranges),
};
-static const struct regmap_access_table da9063_writeable_table = {
- .yes_ranges = da9063_writeable_ranges,
- .n_yes_ranges = ARRAY_SIZE(da9063_writeable_ranges),
+static const struct regmap_access_table da9063_ad_writeable_table = {
+ .yes_ranges = da9063_ad_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_ad_writeable_ranges),
};
-static const struct regmap_access_table da9063_volatile_table = {
- .yes_ranges = da9063_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(da9063_volatile_ranges),
+static const struct regmap_access_table da9063_ad_volatile_table = {
+ .yes_ranges = da9063_ad_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_ad_volatile_ranges),
+};
+
+static const struct regmap_range da9063_bb_readable_ranges[] = {
+ {
+ .range_min = DA9063_REG_PAGE_CON,
+ .range_max = DA9063_BB_REG_SECOND_D,
+ }, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_ID_32_31,
+ }, {
+ .range_min = DA9063_REG_SEQ_A,
+ .range_max = DA9063_REG_AUTO3_LOW,
+ }, {
+ .range_min = DA9063_REG_T_OFFSET,
+ .range_max = DA9063_BB_REG_GP_ID_19,
+ }, {
+ .range_min = DA9063_REG_CHIP_ID,
+ .range_max = DA9063_REG_CHIP_VARIANT,
+ },
+};
+
+static const struct regmap_range da9063_bb_writeable_ranges[] = {
+ {
+ .range_min = DA9063_REG_PAGE_CON,
+ .range_max = DA9063_REG_PAGE_CON,
+ }, {
+ .range_min = DA9063_REG_FAULT_LOG,
+ .range_max = DA9063_REG_VSYS_MON,
+ }, {
+ .range_min = DA9063_REG_COUNT_S,
+ .range_max = DA9063_BB_REG_ALARM_Y,
+ }, {
+ .range_min = DA9063_REG_SEQ,
+ .range_max = DA9063_REG_ID_32_31,
+ }, {
+ .range_min = DA9063_REG_SEQ_A,
+ .range_max = DA9063_REG_AUTO3_LOW,
+ }, {
+ .range_min = DA9063_REG_CONFIG_I,
+ .range_max = DA9063_BB_REG_MON_REG_4,
+ }, {
+ .range_min = DA9063_BB_REG_GP_ID_0,
+ .range_max = DA9063_BB_REG_GP_ID_19,
+ },
+};
+
+static const struct regmap_range da9063_bb_volatile_ranges[] = {
+ {
+ .range_min = DA9063_REG_STATUS_A,
+ .range_max = DA9063_REG_EVENT_D,
+ }, {
+ .range_min = DA9063_REG_CONTROL_F,
+ .range_max = DA9063_REG_CONTROL_F,
+ }, {
+ .range_min = DA9063_REG_ADC_MAN,
+ .range_max = DA9063_REG_ADC_MAN,
+ }, {
+ .range_min = DA9063_REG_ADC_RES_L,
+ .range_max = DA9063_BB_REG_SECOND_D,
+ }, {
+ .range_min = DA9063_BB_REG_MON_REG_5,
+ .range_max = DA9063_BB_REG_MON_REG_6,
+ },
+};
+
+static const struct regmap_access_table da9063_bb_readable_table = {
+ .yes_ranges = da9063_bb_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_bb_readable_ranges),
+};
+
+static const struct regmap_access_table da9063_bb_writeable_table = {
+ .yes_ranges = da9063_bb_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_bb_writeable_ranges),
+};
+
+static const struct regmap_access_table da9063_bb_volatile_table = {
+ .yes_ranges = da9063_bb_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges),
};
static const struct regmap_range_cfg da9063_range_cfg[] = {
@@ -123,10 +201,6 @@ static struct regmap_config da9063_regmap_config = {
.max_register = DA9063_REG_CHIP_VARIANT,
.cache_type = REGCACHE_RBTREE,
-
- .rd_table = &da9063_readable_table,
- .wr_table = &da9063_writeable_table,
- .volatile_table = &da9063_volatile_table,
};
static int da9063_i2c_probe(struct i2c_client *i2c,
@@ -143,6 +217,16 @@ static int da9063_i2c_probe(struct i2c_client *i2c,
da9063->dev = &i2c->dev;
da9063->chip_irq = i2c->irq;
+ if (da9063->variant_code == PMIC_DA9063_AD) {
+ da9063_regmap_config.rd_table = &da9063_ad_readable_table;
+ da9063_regmap_config.wr_table = &da9063_ad_writeable_table;
+ da9063_regmap_config.volatile_table = &da9063_ad_volatile_table;
+ } else {
+ da9063_regmap_config.rd_table = &da9063_bb_readable_table;
+ da9063_regmap_config.wr_table = &da9063_bb_writeable_table;
+ da9063_regmap_config.volatile_table = &da9063_bb_volatile_table;
+ }
+
da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config);
if (IS_ERR(da9063->regmap)) {
ret = PTR_ERR(da9063->regmap);
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 7a55c0071fa8..4c826f78acd0 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(dm355evm_msp_read);
* Many of the msp430 pins are just used as fixed-direction GPIOs.
* We could export a few more of them this way, if we wanted.
*/
-#define MSP_GPIO(bit,reg) ((DM355EVM_MSP_ ## reg) << 3 | (bit))
+#define MSP_GPIO(bit, reg) ((DM355EVM_MSP_ ## reg) << 3 | (bit))
static const u8 msp_gpios[] = {
/* eight leds */
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 2ed774e7d342..5991faddd3c6 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -62,7 +62,7 @@ static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
struct spi_message m;
int status;
- memset(&t, 0, sizeof t);
+ memset(&t, 0, sizeof(t));
spi_message_init(&m);
t.len = sizeof(u32);
spi_message_add_tail(&t, &m);
@@ -211,7 +211,6 @@ static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
desc->irq_data.chip->irq_ack(&desc->irq_data);
queue_work(pcap->workqueue, &pcap->isr_work);
- return;
}
/* ADC */
diff --git a/drivers/mfd/htc-i2cpld.c b/drivers/mfd/htc-i2cpld.c
index d7b2a75aca3e..b44f0203983b 100644
--- a/drivers/mfd/htc-i2cpld.c
+++ b/drivers/mfd/htc-i2cpld.c
@@ -332,18 +332,13 @@ static int htcpld_setup_chip_irq(
int chip_index)
{
struct htcpld_data *htcpld;
- struct device *dev = &pdev->dev;
- struct htcpld_core_platform_data *pdata;
struct htcpld_chip *chip;
- struct htcpld_chip_platform_data *plat_chip_data;
unsigned int irq, irq_end;
int ret = 0;
/* Get the platform and driver data */
- pdata = dev_get_platdata(dev);
htcpld = platform_get_drvdata(pdev);
chip = &htcpld->chip[chip_index];
- plat_chip_data = &pdata->chip[chip_index];
/* Setup irq handlers */
irq_end = chip->irq_start + chip->nirqs;
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 049fd23af54a..443e7cddff28 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -27,7 +27,7 @@
/*
* MSIC interrupt tree is readable from SRAM at INTEL_MSIC_IRQ_PHYS_BASE.
- * Since IRQ block starts from address 0x002 we need to substract that from
+ * Since IRQ block starts from address 0x002 we need to subtract that from
* the actual IRQ status register address.
*/
#define MSIC_IRQ_STATUS(x) (INTEL_MSIC_IRQ_PHYS_BASE + ((x) - 2))
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
new file mode 100644
index 000000000000..2720922f90b4
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -0,0 +1,170 @@
+/*
+ * intel_soc_pmic_core.c - Intel SoC PMIC MFD Driver
+ *
+ * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Yang, Bin <bin.yang@intel.com>
+ * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/gpio/consumer.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include "intel_soc_pmic_core.h"
+
+/*
+ * On some boards the PMIC interrupt may come from a GPIO line.
+ * Try to lookup the ACPI table and see if such connection exists. If not,
+ * return -ENOENT and use the IRQ provided by I2C.
+ */
+static int intel_soc_pmic_find_gpio_irq(struct device *dev)
+{
+ struct gpio_desc *desc;
+ int irq;
+
+ desc = devm_gpiod_get_index(dev, "intel_soc_pmic", 0);
+ if (IS_ERR(desc))
+ return -ENOENT;
+
+ irq = gpiod_to_irq(desc);
+ if (irq < 0)
+ dev_warn(dev, "Can't get irq: %d\n", irq);
+
+ return irq;
+}
+
+static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *i2c_id)
+{
+ struct device *dev = &i2c->dev;
+ const struct acpi_device_id *id;
+ struct intel_soc_pmic_config *config;
+ struct intel_soc_pmic *pmic;
+ int ret;
+ int irq;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id || !id->driver_data)
+ return -ENODEV;
+
+ config = (struct intel_soc_pmic_config *)id->driver_data;
+
+ pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL);
+ dev_set_drvdata(dev, pmic);
+
+ pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config);
+
+ irq = intel_soc_pmic_find_gpio_irq(dev);
+ pmic->irq = (irq < 0) ? i2c->irq : irq;
+
+ ret = regmap_add_irq_chip(pmic->regmap, pmic->irq,
+ config->irq_flags | IRQF_ONESHOT,
+ 0, config->irq_chip,
+ &pmic->irq_chip_data);
+ if (ret)
+ return ret;
+
+ ret = enable_irq_wake(pmic->irq);
+ if (ret)
+ dev_warn(dev, "Can't enable IRQ as wake source: %d\n", ret);
+
+ ret = mfd_add_devices(dev, -1, config->cell_dev,
+ config->n_cell_devs, NULL, 0,
+ regmap_irq_get_domain(pmic->irq_chip_data));
+ if (ret)
+ goto err_del_irq_chip;
+
+ return 0;
+
+err_del_irq_chip:
+ regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
+ return ret;
+}
+
+static int intel_soc_pmic_i2c_remove(struct i2c_client *i2c)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev);
+
+ regmap_del_irq_chip(pmic->irq, pmic->irq_chip_data);
+
+ mfd_remove_devices(&i2c->dev);
+
+ return 0;
+}
+
+static void intel_soc_pmic_shutdown(struct i2c_client *i2c)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(&i2c->dev);
+
+ disable_irq(pmic->irq);
+
+ return;
+}
+
+static int intel_soc_pmic_suspend(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ disable_irq(pmic->irq);
+
+ return 0;
+}
+
+static int intel_soc_pmic_resume(struct device *dev)
+{
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev);
+
+ enable_irq(pmic->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(intel_soc_pmic_pm_ops, intel_soc_pmic_suspend,
+ intel_soc_pmic_resume);
+
+static const struct i2c_device_id intel_soc_pmic_i2c_id[] = {
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, intel_soc_pmic_i2c_id);
+
+#if defined(CONFIG_ACPI)
+static struct acpi_device_id intel_soc_pmic_acpi_match[] = {
+ {"INT33FD", (kernel_ulong_t)&intel_soc_pmic_config_crc},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, intel_soc_pmic_acpi_match);
+#endif
+
+static struct i2c_driver intel_soc_pmic_i2c_driver = {
+ .driver = {
+ .name = "intel_soc_pmic_i2c",
+ .owner = THIS_MODULE,
+ .pm = &intel_soc_pmic_pm_ops,
+ .acpi_match_table = ACPI_PTR(intel_soc_pmic_acpi_match),
+ },
+ .probe = intel_soc_pmic_i2c_probe,
+ .remove = intel_soc_pmic_i2c_remove,
+ .id_table = intel_soc_pmic_i2c_id,
+ .shutdown = intel_soc_pmic_shutdown,
+};
+
+module_i2c_driver(intel_soc_pmic_i2c_driver);
+
+MODULE_DESCRIPTION("I2C driver for Intel SoC PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yang, Bin <bin.yang@intel.com>");
+MODULE_AUTHOR("Zhu, Lejun <lejun.zhu@linux.intel.com>");
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
new file mode 100644
index 000000000000..33aacd9baddc
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_core.h
@@ -0,0 +1,32 @@
+/*
+ * intel_soc_pmic_core.h - Intel SoC PMIC MFD Driver
+ *
+ * Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Yang, Bin <bin.yang@intel.com>
+ * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
+ */
+
+#ifndef __INTEL_SOC_PMIC_CORE_H__
+#define __INTEL_SOC_PMIC_CORE_H__
+
+struct intel_soc_pmic_config {
+ unsigned long irq_flags;
+ struct mfd_cell *cell_dev;
+ int n_cell_devs;
+ struct regmap_config *regmap_config;
+ struct regmap_irq_chip *irq_chip;
+};
+
+extern struct intel_soc_pmic_config intel_soc_pmic_config_crc;
+
+#endif /* __INTEL_SOC_PMIC_CORE_H__ */
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
new file mode 100644
index 000000000000..7107cab832e6
--- /dev/null
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -0,0 +1,158 @@
+/*
+ * intel_soc_pmic_crc.c - Device access for Crystal Cove PMIC
+ *
+ * Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Yang, Bin <bin.yang@intel.com>
+ * Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
+ */
+
+#include <linux/mfd/core.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include "intel_soc_pmic_core.h"
+
+#define CRYSTAL_COVE_MAX_REGISTER 0xC6
+
+#define CRYSTAL_COVE_REG_IRQLVL1 0x02
+#define CRYSTAL_COVE_REG_MIRQLVL1 0x0E
+
+#define CRYSTAL_COVE_IRQ_PWRSRC 0
+#define CRYSTAL_COVE_IRQ_THRM 1
+#define CRYSTAL_COVE_IRQ_BCU 2
+#define CRYSTAL_COVE_IRQ_ADC 3
+#define CRYSTAL_COVE_IRQ_CHGR 4
+#define CRYSTAL_COVE_IRQ_GPIO 5
+#define CRYSTAL_COVE_IRQ_VHDMIOCP 6
+
+static struct resource gpio_resources[] = {
+ {
+ .name = "GPIO",
+ .start = CRYSTAL_COVE_IRQ_GPIO,
+ .end = CRYSTAL_COVE_IRQ_GPIO,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource pwrsrc_resources[] = {
+ {
+ .name = "PWRSRC",
+ .start = CRYSTAL_COVE_IRQ_PWRSRC,
+ .end = CRYSTAL_COVE_IRQ_PWRSRC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource adc_resources[] = {
+ {
+ .name = "ADC",
+ .start = CRYSTAL_COVE_IRQ_ADC,
+ .end = CRYSTAL_COVE_IRQ_ADC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource thermal_resources[] = {
+ {
+ .name = "THERMAL",
+ .start = CRYSTAL_COVE_IRQ_THRM,
+ .end = CRYSTAL_COVE_IRQ_THRM,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource bcu_resources[] = {
+ {
+ .name = "BCU",
+ .start = CRYSTAL_COVE_IRQ_BCU,
+ .end = CRYSTAL_COVE_IRQ_BCU,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell crystal_cove_dev[] = {
+ {
+ .name = "crystal_cove_pwrsrc",
+ .num_resources = ARRAY_SIZE(pwrsrc_resources),
+ .resources = pwrsrc_resources,
+ },
+ {
+ .name = "crystal_cove_adc",
+ .num_resources = ARRAY_SIZE(adc_resources),
+ .resources = adc_resources,
+ },
+ {
+ .name = "crystal_cove_thermal",
+ .num_resources = ARRAY_SIZE(thermal_resources),
+ .resources = thermal_resources,
+ },
+ {
+ .name = "crystal_cove_bcu",
+ .num_resources = ARRAY_SIZE(bcu_resources),
+ .resources = bcu_resources,
+ },
+ {
+ .name = "crystal_cove_gpio",
+ .num_resources = ARRAY_SIZE(gpio_resources),
+ .resources = gpio_resources,
+ },
+};
+
+static struct regmap_config crystal_cove_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = CRYSTAL_COVE_MAX_REGISTER,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_irq crystal_cove_irqs[] = {
+ [CRYSTAL_COVE_IRQ_PWRSRC] = {
+ .mask = BIT(CRYSTAL_COVE_IRQ_PWRSRC),
+ },
+ [CRYSTAL_COVE_IRQ_THRM] = {
+ .mask = BIT(CRYSTAL_COVE_IRQ_THRM),
+ },
+ [CRYSTAL_COVE_IRQ_BCU] = {
+ .mask = BIT(CRYSTAL_COVE_IRQ_BCU),
+ },
+ [CRYSTAL_COVE_IRQ_ADC] = {
+ .mask = BIT(CRYSTAL_COVE_IRQ_ADC),
+ },
+ [CRYSTAL_COVE_IRQ_CHGR] = {
+ .mask = BIT(CRYSTAL_COVE_IRQ_CHGR),
+ },
+ [CRYSTAL_COVE_IRQ_GPIO] = {
+ .mask = BIT(CRYSTAL_COVE_IRQ_GPIO),
+ },
+ [CRYSTAL_COVE_IRQ_VHDMIOCP] = {
+ .mask = BIT(CRYSTAL_COVE_IRQ_VHDMIOCP),
+ },
+};
+
+static struct regmap_irq_chip crystal_cove_irq_chip = {
+ .name = "Crystal Cove",
+ .irqs = crystal_cove_irqs,
+ .num_irqs = ARRAY_SIZE(crystal_cove_irqs),
+ .num_regs = 1,
+ .status_base = CRYSTAL_COVE_REG_IRQLVL1,
+ .mask_base = CRYSTAL_COVE_REG_MIRQLVL1,
+};
+
+struct intel_soc_pmic_config intel_soc_pmic_config_crc = {
+ .irq_flags = IRQF_TRIGGER_RISING,
+ .cell_dev = crystal_cove_dev,
+ .n_cell_devs = ARRAY_SIZE(crystal_cove_dev),
+ .regmap_config = &crystal_cove_regmap_config,
+ .irq_chip = &crystal_cove_irq_chip,
+};
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 7e50fe0118e3..8df3266064e4 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -115,7 +115,7 @@ static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data)
} else {
dev_err(micro->dev,
"out of band RX message 0x%02x\n", id);
- if(!micro->msg)
+ if (!micro->msg)
dev_info(micro->dev, "no message queued\n");
else
dev_info(micro->dev, "expected message %02x\n",
@@ -126,13 +126,13 @@ static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data)
if (micro->key)
micro->key(micro->key_data, len, data);
else
- dev_dbg(micro->dev, "key message ignored, no handle \n");
+ dev_dbg(micro->dev, "key message ignored, no handle\n");
break;
case MSG_TOUCHSCREEN:
if (micro->ts)
micro->ts(micro->ts_data, len, data);
else
- dev_dbg(micro->dev, "touchscreen message ignored, no handle \n");
+ dev_dbg(micro->dev, "touchscreen message ignored, no handle\n");
break;
default:
dev_err(micro->dev,
@@ -154,7 +154,7 @@ static void micro_process_char(struct ipaq_micro *micro, u8 ch)
rx->state = STATE_ID; /* Next byte is the id and len */
break;
case STATE_ID: /* Looking for id and len byte */
- rx->id = (ch & 0xf0) >> 4 ;
+ rx->id = (ch & 0xf0) >> 4;
rx->len = (ch & 0x0f);
rx->index = 0;
rx->chksum = ch;
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index f7ff0188603d..bd2696136eee 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -24,7 +24,8 @@
#define MAX_ID_LEN 4
static char force_device_id[MAX_ID_LEN + 1] = "";
-module_param_string(force_device_id, force_device_id, sizeof(force_device_id), 0);
+module_param_string(force_device_id, force_device_id,
+ sizeof(force_device_id), 0);
MODULE_PARM_DESC(force_device_id, "Override detected product");
/*
@@ -36,7 +37,7 @@ static void kempld_get_hardware_mutex(struct kempld_device_data *pld)
{
/* The mutex bit will read 1 until access has been granted */
while (ioread8(pld->io_index) & KEMPLD_MUTEX_KEY)
- msleep(1);
+ usleep_range(1000, 3000);
}
static void kempld_release_hardware_mutex(struct kempld_device_data *pld)
@@ -499,7 +500,7 @@ static struct platform_driver kempld_driver = {
.remove = kempld_remove,
};
-static struct dmi_system_id __initdata kempld_dmi_table[] = {
+static struct dmi_system_id kempld_dmi_table[] __initdata = {
{
.ident = "BHL6",
.matches = {
@@ -736,7 +737,8 @@ static int __init kempld_init(void)
int ret;
if (force_device_id[0]) {
- for (id = kempld_dmi_table; id->matches[0].slot != DMI_NONE; id++)
+ for (id = kempld_dmi_table;
+ id->matches[0].slot != DMI_NONE; id++)
if (strstr(id->ident, force_device_id))
if (id->callback && id->callback(id))
break;
diff --git a/drivers/mfd/lp8788-irq.c b/drivers/mfd/lp8788-irq.c
index c84ded5f8ece..23982dbf014d 100644
--- a/drivers/mfd/lp8788-irq.c
+++ b/drivers/mfd/lp8788-irq.c
@@ -66,12 +66,14 @@ static inline u8 _irq_to_val(enum lp8788_int_id id, int enable)
static void lp8788_irq_enable(struct irq_data *data)
{
struct lp8788_irq_data *irqd = irq_data_get_irq_chip_data(data);
+
irqd->enabled[data->hwirq] = 1;
}
static void lp8788_irq_disable(struct irq_data *data)
{
struct lp8788_irq_data *irqd = irq_data_get_irq_chip_data(data);
+
irqd->enabled[data->hwirq] = 0;
}
diff --git a/drivers/mfd/max77686-irq.c b/drivers/mfd/max77686-irq.c
deleted file mode 100644
index cdc3280e2ec7..000000000000
--- a/drivers/mfd/max77686-irq.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * max77686-irq.c - Interrupt controller support for MAX77686
- *
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- * Chiwoong Byun <woong.byun@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997-irq.c
- */
-
-#include <linux/err.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/mfd/max77686.h>
-#include <linux/mfd/max77686-private.h>
-#include <linux/irqdomain.h>
-#include <linux/regmap.h>
-
-enum {
- MAX77686_DEBUG_IRQ_INFO = 1 << 0,
- MAX77686_DEBUG_IRQ_MASK = 1 << 1,
- MAX77686_DEBUG_IRQ_INT = 1 << 2,
-};
-
-static int debug_mask = 0;
-module_param(debug_mask, int, 0);
-MODULE_PARM_DESC(debug_mask, "Set debug_mask : 0x0=off 0x1=IRQ_INFO 0x2=IRQ_MASK 0x4=IRQ_INI)");
-
-static const u8 max77686_mask_reg[] = {
- [PMIC_INT1] = MAX77686_REG_INT1MSK,
- [PMIC_INT2] = MAX77686_REG_INT2MSK,
- [RTC_INT] = MAX77686_RTC_INTM,
-};
-
-static struct regmap *max77686_get_regmap(struct max77686_dev *max77686,
- enum max77686_irq_source src)
-{
- switch (src) {
- case PMIC_INT1 ... PMIC_INT2:
- return max77686->regmap;
- case RTC_INT:
- return max77686->rtc_regmap;
- default:
- return ERR_PTR(-EINVAL);
- }
-}
-
-struct max77686_irq_data {
- int mask;
- enum max77686_irq_source group;
-};
-
-#define DECLARE_IRQ(idx, _group, _mask) \
- [(idx)] = { .group = (_group), .mask = (_mask) }
-static const struct max77686_irq_data max77686_irqs[] = {
- DECLARE_IRQ(MAX77686_PMICIRQ_PWRONF, PMIC_INT1, 1 << 0),
- DECLARE_IRQ(MAX77686_PMICIRQ_PWRONR, PMIC_INT1, 1 << 1),
- DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBF, PMIC_INT1, 1 << 2),
- DECLARE_IRQ(MAX77686_PMICIRQ_JIGONBR, PMIC_INT1, 1 << 3),
- DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBF, PMIC_INT1, 1 << 4),
- DECLARE_IRQ(MAX77686_PMICIRQ_ACOKBR, PMIC_INT1, 1 << 5),
- DECLARE_IRQ(MAX77686_PMICIRQ_ONKEY1S, PMIC_INT1, 1 << 6),
- DECLARE_IRQ(MAX77686_PMICIRQ_MRSTB, PMIC_INT1, 1 << 7),
- DECLARE_IRQ(MAX77686_PMICIRQ_140C, PMIC_INT2, 1 << 0),
- DECLARE_IRQ(MAX77686_PMICIRQ_120C, PMIC_INT2, 1 << 1),
- DECLARE_IRQ(MAX77686_RTCIRQ_RTC60S, RTC_INT, 1 << 0),
- DECLARE_IRQ(MAX77686_RTCIRQ_RTCA1, RTC_INT, 1 << 1),
- DECLARE_IRQ(MAX77686_RTCIRQ_RTCA2, RTC_INT, 1 << 2),
- DECLARE_IRQ(MAX77686_RTCIRQ_SMPL, RTC_INT, 1 << 3),
- DECLARE_IRQ(MAX77686_RTCIRQ_RTC1S, RTC_INT, 1 << 4),
- DECLARE_IRQ(MAX77686_RTCIRQ_WTSR, RTC_INT, 1 << 5),
-};
-
-static void max77686_irq_lock(struct irq_data *data)
-{
- struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
-
- if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
- pr_info("%s\n", __func__);
-
- mutex_lock(&max77686->irqlock);
-}
-
-static void max77686_irq_sync_unlock(struct irq_data *data)
-{
- struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
- int i;
-
- for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) {
- u8 mask_reg = max77686_mask_reg[i];
- struct regmap *map = max77686_get_regmap(max77686, i);
-
- if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
- pr_debug("%s: mask_reg[%d]=0x%x, cur=0x%x\n",
- __func__, i, mask_reg, max77686->irq_masks_cur[i]);
-
- if (mask_reg == MAX77686_REG_INVALID ||
- IS_ERR_OR_NULL(map))
- continue;
-
- max77686->irq_masks_cache[i] = max77686->irq_masks_cur[i];
-
- regmap_write(map, max77686_mask_reg[i],
- max77686->irq_masks_cur[i]);
- }
-
- mutex_unlock(&max77686->irqlock);
-}
-
-static const inline struct max77686_irq_data *to_max77686_irq(int irq)
-{
- struct irq_data *data = irq_get_irq_data(irq);
- return &max77686_irqs[data->hwirq];
-}
-
-static void max77686_irq_mask(struct irq_data *data)
-{
- struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
- const struct max77686_irq_data *irq_data = to_max77686_irq(data->irq);
-
- max77686->irq_masks_cur[irq_data->group] |= irq_data->mask;
-
- if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
- pr_info("%s: group=%d, cur=0x%x\n",
- __func__, irq_data->group,
- max77686->irq_masks_cur[irq_data->group]);
-}
-
-static void max77686_irq_unmask(struct irq_data *data)
-{
- struct max77686_dev *max77686 = irq_get_chip_data(data->irq);
- const struct max77686_irq_data *irq_data = to_max77686_irq(data->irq);
-
- max77686->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
-
- if (debug_mask & MAX77686_DEBUG_IRQ_MASK)
- pr_info("%s: group=%d, cur=0x%x\n",
- __func__, irq_data->group,
- max77686->irq_masks_cur[irq_data->group]);
-}
-
-static struct irq_chip max77686_irq_chip = {
- .name = "max77686",
- .irq_bus_lock = max77686_irq_lock,
- .irq_bus_sync_unlock = max77686_irq_sync_unlock,
- .irq_mask = max77686_irq_mask,
- .irq_unmask = max77686_irq_unmask,
-};
-
-static irqreturn_t max77686_irq_thread(int irq, void *data)
-{
- struct max77686_dev *max77686 = data;
- unsigned int irq_reg[MAX77686_IRQ_GROUP_NR] = {};
- unsigned int irq_src;
- int ret;
- int i, cur_irq;
-
- ret = regmap_read(max77686->regmap, MAX77686_REG_INTSRC, &irq_src);
- if (ret < 0) {
- dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
- ret);
- return IRQ_NONE;
- }
-
- if (debug_mask & MAX77686_DEBUG_IRQ_INT)
- pr_info("%s: irq_src=0x%x\n", __func__, irq_src);
-
- if (irq_src == MAX77686_IRQSRC_PMIC) {
- ret = regmap_bulk_read(max77686->regmap,
- MAX77686_REG_INT1, irq_reg, 2);
- if (ret < 0) {
- dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
- ret);
- return IRQ_NONE;
- }
-
- if (debug_mask & MAX77686_DEBUG_IRQ_INT)
- pr_info("%s: int1=0x%x, int2=0x%x\n", __func__,
- irq_reg[PMIC_INT1], irq_reg[PMIC_INT2]);
- }
-
- if (irq_src & MAX77686_IRQSRC_RTC) {
- ret = regmap_read(max77686->rtc_regmap,
- MAX77686_RTC_INT, &irq_reg[RTC_INT]);
- if (ret < 0) {
- dev_err(max77686->dev, "Failed to read interrupt source: %d\n",
- ret);
- return IRQ_NONE;
- }
-
- if (debug_mask & MAX77686_DEBUG_IRQ_INT)
- pr_info("%s: rtc int=0x%x\n", __func__,
- irq_reg[RTC_INT]);
-
- }
-
- for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++)
- irq_reg[i] &= ~max77686->irq_masks_cur[i];
-
- for (i = 0; i < MAX77686_IRQ_NR; i++) {
- if (irq_reg[max77686_irqs[i].group] & max77686_irqs[i].mask) {
- cur_irq = irq_find_mapping(max77686->irq_domain, i);
- if (cur_irq)
- handle_nested_irq(cur_irq);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-static int max77686_irq_domain_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hw)
-{
- struct max77686_dev *max77686 = d->host_data;
-
- irq_set_chip_data(irq, max77686);
- irq_set_chip_and_handler(irq, &max77686_irq_chip, handle_edge_irq);
- irq_set_nested_thread(irq, 1);
-#ifdef CONFIG_ARM
- set_irq_flags(irq, IRQF_VALID);
-#else
- irq_set_noprobe(irq);
-#endif
- return 0;
-}
-
-static struct irq_domain_ops max77686_irq_domain_ops = {
- .map = max77686_irq_domain_map,
-};
-
-int max77686_irq_init(struct max77686_dev *max77686)
-{
- struct irq_domain *domain;
- int i;
- int ret;
- int val;
- struct regmap *map;
-
- mutex_init(&max77686->irqlock);
-
- if (max77686->irq_gpio && !max77686->irq) {
- max77686->irq = gpio_to_irq(max77686->irq_gpio);
-
- if (debug_mask & MAX77686_DEBUG_IRQ_INT) {
- ret = gpio_request(max77686->irq_gpio, "pmic_irq");
- if (ret < 0) {
- dev_err(max77686->dev,
- "Failed to request gpio %d with ret:"
- "%d\n", max77686->irq_gpio, ret);
- return IRQ_NONE;
- }
-
- gpio_direction_input(max77686->irq_gpio);
- val = gpio_get_value(max77686->irq_gpio);
- gpio_free(max77686->irq_gpio);
- pr_info("%s: gpio_irq=%x\n", __func__, val);
- }
- }
-
- if (!max77686->irq) {
- dev_err(max77686->dev, "irq is not specified\n");
- return -ENODEV;
- }
-
- /* Mask individual interrupt sources */
- for (i = 0; i < MAX77686_IRQ_GROUP_NR; i++) {
- max77686->irq_masks_cur[i] = 0xff;
- max77686->irq_masks_cache[i] = 0xff;
- map = max77686_get_regmap(max77686, i);
-
- if (IS_ERR_OR_NULL(map))
- continue;
- if (max77686_mask_reg[i] == MAX77686_REG_INVALID)
- continue;
-
- regmap_write(map, max77686_mask_reg[i], 0xff);
- }
- domain = irq_domain_add_linear(NULL, MAX77686_IRQ_NR,
- &max77686_irq_domain_ops, max77686);
- if (!domain) {
- dev_err(max77686->dev, "could not create irq domain\n");
- return -ENODEV;
- }
- max77686->irq_domain = domain;
-
- ret = request_threaded_irq(max77686->irq, NULL, max77686_irq_thread,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "max77686-irq", max77686);
-
- if (ret)
- dev_err(max77686->dev, "Failed to request IRQ %d: %d\n",
- max77686->irq, ret);
-
-
- if (debug_mask & MAX77686_DEBUG_IRQ_INFO)
- pr_info("%s-\n", __func__);
-
- return 0;
-}
-
-void max77686_irq_exit(struct max77686_dev *max77686)
-{
- if (max77686->irq)
- free_irq(max77686->irq, max77686);
-}
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index ce869acf27ae..86e552348db4 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -1,5 +1,5 @@
/*
- * max77686.c - mfd core driver for the Maxim 77686
+ * max77686.c - mfd core driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electronics
* Chiwoong Byun <woong.byun@smasung.com>
@@ -25,6 +25,8 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/mfd/core.h>
@@ -41,15 +43,166 @@ static const struct mfd_cell max77686_devs[] = {
{ .name = "max77686-clk", },
};
+static const struct mfd_cell max77802_devs[] = {
+ { .name = "max77802-pmic", },
+ { .name = "max77802-clk", },
+ { .name = "max77802-rtc", },
+};
+
+static bool max77802_pmic_is_accessible_reg(struct device *dev,
+ unsigned int reg)
+{
+ return (reg >= MAX77802_REG_DEVICE_ID && reg < MAX77802_REG_PMIC_END);
+}
+
+static bool max77802_rtc_is_accessible_reg(struct device *dev,
+ unsigned int reg)
+{
+ return (reg >= MAX77802_RTC_INT && reg < MAX77802_RTC_END);
+}
+
+static bool max77802_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ return (max77802_pmic_is_accessible_reg(dev, reg) ||
+ max77802_rtc_is_accessible_reg(dev, reg));
+}
+
+static bool max77802_pmic_is_precious_reg(struct device *dev, unsigned int reg)
+{
+ return (reg == MAX77802_REG_INTSRC || reg == MAX77802_REG_INT1 ||
+ reg == MAX77802_REG_INT2);
+}
+
+static bool max77802_rtc_is_precious_reg(struct device *dev, unsigned int reg)
+{
+ return (reg == MAX77802_RTC_INT ||
+ reg == MAX77802_RTC_UPDATE0 ||
+ reg == MAX77802_RTC_UPDATE1);
+}
+
+static bool max77802_is_precious_reg(struct device *dev, unsigned int reg)
+{
+ return (max77802_pmic_is_precious_reg(dev, reg) ||
+ max77802_rtc_is_precious_reg(dev, reg));
+}
+
+static bool max77802_pmic_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return (max77802_is_precious_reg(dev, reg) ||
+ reg == MAX77802_REG_STATUS1 || reg == MAX77802_REG_STATUS2 ||
+ reg == MAX77802_REG_PWRON);
+}
+
+static bool max77802_rtc_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return (max77802_rtc_is_precious_reg(dev, reg) ||
+ reg == MAX77802_RTC_SEC ||
+ reg == MAX77802_RTC_MIN ||
+ reg == MAX77802_RTC_HOUR ||
+ reg == MAX77802_RTC_WEEKDAY ||
+ reg == MAX77802_RTC_MONTH ||
+ reg == MAX77802_RTC_YEAR ||
+ reg == MAX77802_RTC_DATE);
+}
+
+static bool max77802_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return (max77802_pmic_is_volatile_reg(dev, reg) ||
+ max77802_rtc_is_volatile_reg(dev, reg));
+}
+
static struct regmap_config max77686_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
-#ifdef CONFIG_OF
+static struct regmap_config max77686_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static struct regmap_config max77802_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = max77802_is_accessible_reg,
+ .readable_reg = max77802_is_accessible_reg,
+ .precious_reg = max77802_is_precious_reg,
+ .volatile_reg = max77802_is_volatile_reg,
+ .name = "max77802-pmic",
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regmap_irq max77686_irqs[] = {
+ /* INT1 interrupts */
+ { .reg_offset = 0, .mask = MAX77686_INT1_PWRONF_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_INT1_PWRONR_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_INT1_JIGONBF_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_INT1_JIGONBR_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_INT1_ACOKBF_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_INT1_ACOKBR_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_INT1_ONKEY1S_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_INT1_MRSTB_MSK, },
+ /* INT2 interrupts */
+ { .reg_offset = 1, .mask = MAX77686_INT2_140C_MSK, },
+ { .reg_offset = 1, .mask = MAX77686_INT2_120C_MSK, },
+};
+
+static const struct regmap_irq_chip max77686_irq_chip = {
+ .name = "max77686-pmic",
+ .status_base = MAX77686_REG_INT1,
+ .mask_base = MAX77686_REG_INT1MSK,
+ .num_regs = 2,
+ .irqs = max77686_irqs,
+ .num_irqs = ARRAY_SIZE(max77686_irqs),
+};
+
+static const struct regmap_irq max77686_rtc_irqs[] = {
+ /* RTC interrupts */
+ { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC60S_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA1_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_RTCINT_RTCA2_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_RTCINT_SMPL_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_RTCINT_RTC1S_MSK, },
+ { .reg_offset = 0, .mask = MAX77686_RTCINT_WTSR_MSK, },
+};
+
+static const struct regmap_irq_chip max77686_rtc_irq_chip = {
+ .name = "max77686-rtc",
+ .status_base = MAX77686_RTC_INT,
+ .mask_base = MAX77686_RTC_INTM,
+ .num_regs = 1,
+ .irqs = max77686_rtc_irqs,
+ .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
+};
+
+static const struct regmap_irq_chip max77802_irq_chip = {
+ .name = "max77802-pmic",
+ .status_base = MAX77802_REG_INT1,
+ .mask_base = MAX77802_REG_INT1MSK,
+ .num_regs = 2,
+ .irqs = max77686_irqs, /* same masks as 77686 */
+ .num_irqs = ARRAY_SIZE(max77686_irqs),
+};
+
+static const struct regmap_irq_chip max77802_rtc_irq_chip = {
+ .name = "max77802-rtc",
+ .status_base = MAX77802_RTC_INT,
+ .mask_base = MAX77802_RTC_INTM,
+ .num_regs = 1,
+ .irqs = max77686_rtc_irqs, /* same masks as 77686 */
+ .num_irqs = ARRAY_SIZE(max77686_rtc_irqs),
+};
+
static const struct of_device_id max77686_pmic_dt_match[] = {
- {.compatible = "maxim,max77686", .data = NULL},
- {},
+ {
+ .compatible = "maxim,max77686",
+ .data = (void *)TYPE_MAX77686,
+ },
+ {
+ .compatible = "maxim,max77802",
+ .data = (void *)TYPE_MAX77802,
+ },
+ { },
};
static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
@@ -58,53 +211,74 @@ static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
struct max77686_platform_data *pd;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- dev_err(dev, "could not allocate memory for pdata\n");
+ if (!pd)
return NULL;
- }
dev->platform_data = pd;
return pd;
}
-#else
-static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device
- *dev)
-{
- return 0;
-}
-#endif
static int max77686_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max77686_dev *max77686 = NULL;
struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev);
+ const struct of_device_id *match;
unsigned int data;
int ret = 0;
+ const struct regmap_config *config;
+ const struct regmap_irq_chip *irq_chip;
+ const struct regmap_irq_chip *rtc_irq_chip;
+ struct regmap **rtc_regmap;
+ const struct mfd_cell *cells;
+ int n_devs;
- if (i2c->dev.of_node)
+ if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node && !pdata)
pdata = max77686_i2c_parse_dt_pdata(&i2c->dev);
if (!pdata) {
dev_err(&i2c->dev, "No platform data found.\n");
- return -EIO;
+ return -EINVAL;
}
max77686 = devm_kzalloc(&i2c->dev,
sizeof(struct max77686_dev), GFP_KERNEL);
- if (max77686 == NULL)
+ if (!max77686)
return -ENOMEM;
+ if (i2c->dev.of_node) {
+ match = of_match_node(max77686_pmic_dt_match, i2c->dev.of_node);
+ if (!match)
+ return -EINVAL;
+
+ max77686->type = (unsigned long)match->data;
+ } else
+ max77686->type = id->driver_data;
+
i2c_set_clientdata(i2c, max77686);
max77686->dev = &i2c->dev;
max77686->i2c = i2c;
- max77686->type = id->driver_data;
max77686->wakeup = pdata->wakeup;
- max77686->irq_gpio = pdata->irq_gpio;
max77686->irq = i2c->irq;
- max77686->regmap = devm_regmap_init_i2c(i2c, &max77686_regmap_config);
+ if (max77686->type == TYPE_MAX77686) {
+ config = &max77686_regmap_config;
+ irq_chip = &max77686_irq_chip;
+ rtc_irq_chip = &max77686_rtc_irq_chip;
+ rtc_regmap = &max77686->rtc_regmap;
+ cells = max77686_devs;
+ n_devs = ARRAY_SIZE(max77686_devs);
+ } else {
+ config = &max77802_regmap_config;
+ irq_chip = &max77802_irq_chip;
+ rtc_irq_chip = &max77802_rtc_irq_chip;
+ rtc_regmap = &max77686->regmap;
+ cells = max77802_devs;
+ n_devs = ARRAY_SIZE(max77802_devs);
+ }
+
+ max77686->regmap = devm_regmap_init_i2c(i2c, config);
if (IS_ERR(max77686->regmap)) {
ret = PTR_ERR(max77686->regmap);
dev_err(max77686->dev, "Failed to allocate register map: %d\n",
@@ -112,30 +286,68 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
return ret;
}
- if (regmap_read(max77686->regmap,
- MAX77686_REG_DEVICE_ID, &data) < 0) {
+ ret = regmap_read(max77686->regmap, MAX77686_REG_DEVICE_ID, &data);
+ if (ret < 0) {
dev_err(max77686->dev,
"device not found on this channel (this is not an error)\n");
return -ENODEV;
- } else
- dev_info(max77686->dev, "device found\n");
+ }
- max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
- if (!max77686->rtc) {
- dev_err(max77686->dev, "Failed to allocate I2C device for RTC\n");
- return -ENODEV;
+ if (max77686->type == TYPE_MAX77686) {
+ max77686->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
+ if (!max77686->rtc) {
+ dev_err(max77686->dev,
+ "Failed to allocate I2C device for RTC\n");
+ return -ENODEV;
+ }
+ i2c_set_clientdata(max77686->rtc, max77686);
+
+ max77686->rtc_regmap =
+ devm_regmap_init_i2c(max77686->rtc,
+ &max77686_rtc_regmap_config);
+ if (IS_ERR(max77686->rtc_regmap)) {
+ ret = PTR_ERR(max77686->rtc_regmap);
+ dev_err(max77686->dev,
+ "failed to allocate RTC regmap: %d\n",
+ ret);
+ goto err_unregister_i2c;
+ }
+ }
+
+ ret = regmap_add_irq_chip(max77686->regmap, max77686->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_SHARED, 0, irq_chip,
+ &max77686->irq_data);
+ if (ret) {
+ dev_err(&i2c->dev, "failed to add PMIC irq chip: %d\n", ret);
+ goto err_unregister_i2c;
}
- i2c_set_clientdata(max77686->rtc, max77686);
- max77686_irq_init(max77686);
+ ret = regmap_add_irq_chip(*rtc_regmap, max77686->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
+ IRQF_SHARED, 0, rtc_irq_chip,
+ &max77686->rtc_irq_data);
+ if (ret) {
+ dev_err(&i2c->dev, "failed to add RTC irq chip: %d\n", ret);
+ goto err_del_irqc;
+ }
- ret = mfd_add_devices(max77686->dev, -1, max77686_devs,
- ARRAY_SIZE(max77686_devs), NULL, 0, NULL);
+ ret = mfd_add_devices(max77686->dev, -1, cells, n_devs, NULL, 0, NULL);
if (ret < 0) {
- mfd_remove_devices(max77686->dev);
- i2c_unregister_device(max77686->rtc);
+ dev_err(&i2c->dev, "failed to add MFD devices: %d\n", ret);
+ goto err_del_rtc_irqc;
}
+ return 0;
+
+err_del_rtc_irqc:
+ regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
+err_del_irqc:
+ regmap_del_irq_chip(max77686->irq, max77686->irq_data);
+err_unregister_i2c:
+ if (max77686->type == TYPE_MAX77686)
+ i2c_unregister_device(max77686->rtc);
+
return ret;
}
@@ -144,7 +356,12 @@ static int max77686_i2c_remove(struct i2c_client *i2c)
struct max77686_dev *max77686 = i2c_get_clientdata(i2c);
mfd_remove_devices(max77686->dev);
- i2c_unregister_device(max77686->rtc);
+
+ regmap_del_irq_chip(max77686->irq, max77686->rtc_irq_data);
+ regmap_del_irq_chip(max77686->irq, max77686->irq_data);
+
+ if (max77686->type == TYPE_MAX77686)
+ i2c_unregister_device(max77686->rtc);
return 0;
}
@@ -155,10 +372,50 @@ static const struct i2c_device_id max77686_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max77686_i2c_id);
+#ifdef CONFIG_PM_SLEEP
+static int max77686_suspend(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77686_dev *max77686 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(max77686->irq);
+
+ /*
+ * IRQ must be disabled during suspend because if it happens
+ * while suspended it will be handled before resuming I2C.
+ *
+ * When device is woken up from suspend (e.g. by RTC wake alarm),
+ * an interrupt occurs before resuming I2C bus controller.
+ * Interrupt handler tries to read registers but this read
+ * will fail because I2C is still suspended.
+ */
+ disable_irq(max77686->irq);
+
+ return 0;
+}
+
+static int max77686_resume(struct device *dev)
+{
+ struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
+ struct max77686_dev *max77686 = i2c_get_clientdata(i2c);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(max77686->irq);
+
+ enable_irq(max77686->irq);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(max77686_pm, max77686_suspend, max77686_resume);
+
static struct i2c_driver max77686_i2c_driver = {
.driver = {
.name = "max77686",
.owner = THIS_MODULE,
+ .pm = &max77686_pm,
.of_match_table = of_match_ptr(max77686_pmic_dt_match),
},
.probe = max77686_i2c_probe,
@@ -179,6 +436,6 @@ static void __exit max77686_i2c_exit(void)
}
module_exit(max77686_i2c_exit);
-MODULE_DESCRIPTION("MAXIM 77686 multi-function core driver");
+MODULE_DESCRIPTION("MAXIM 77686/802 multi-function core driver");
MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index f3faf0c45ddd..97a787ab3d51 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -624,6 +624,7 @@ static void max8925_irq_sync_unlock(struct irq_data *data)
static void max8925_irq_enable(struct irq_data *data)
{
struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+
max8925_irqs[data->irq - chip->irq_base].enable
= max8925_irqs[data->irq - chip->irq_base].offs;
}
@@ -631,6 +632,7 @@ static void max8925_irq_enable(struct irq_data *data)
static void max8925_irq_disable(struct irq_data *data)
{
struct max8925_chip *chip = irq_data_get_irq_chip_data(data);
+
max8925_irqs[data->irq - chip->irq_base].enable = 0;
}
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index a83eed5c15ca..ecbe78ead3b6 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -257,9 +257,11 @@ static struct i2c_driver max8925_driver = {
static int __init max8925_i2c_init(void)
{
int ret;
+
ret = i2c_add_driver(&max8925_driver);
if (ret != 0)
pr_err("Failed to register MAX8925 I2C driver: %d\n", ret);
+
return ret;
}
subsys_initcall(max8925_i2c_init);
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index acf5dd712eb2..2b6bc868cd3d 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -10,106 +10,18 @@
* Free Software Foundation.
*/
-#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/mc13xxx.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
#include "mc13xxx.h"
#define MC13XXX_IRQSTAT0 0
-#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0)
-#define MC13XXX_IRQSTAT0_ADCBISDONEI (1 << 1)
-#define MC13XXX_IRQSTAT0_TSI (1 << 2)
-#define MC13783_IRQSTAT0_WHIGHI (1 << 3)
-#define MC13783_IRQSTAT0_WLOWI (1 << 4)
-#define MC13XXX_IRQSTAT0_CHGDETI (1 << 6)
-#define MC13783_IRQSTAT0_CHGOVI (1 << 7)
-#define MC13XXX_IRQSTAT0_CHGREVI (1 << 8)
-#define MC13XXX_IRQSTAT0_CHGSHORTI (1 << 9)
-#define MC13XXX_IRQSTAT0_CCCVI (1 << 10)
-#define MC13XXX_IRQSTAT0_CHGCURRI (1 << 11)
-#define MC13XXX_IRQSTAT0_BPONI (1 << 12)
-#define MC13XXX_IRQSTAT0_LOBATLI (1 << 13)
-#define MC13XXX_IRQSTAT0_LOBATHI (1 << 14)
-#define MC13783_IRQSTAT0_UDPI (1 << 15)
-#define MC13783_IRQSTAT0_USBI (1 << 16)
-#define MC13783_IRQSTAT0_IDI (1 << 19)
-#define MC13783_IRQSTAT0_SE1I (1 << 21)
-#define MC13783_IRQSTAT0_CKDETI (1 << 22)
-#define MC13783_IRQSTAT0_UDMI (1 << 23)
-
#define MC13XXX_IRQMASK0 1
-#define MC13XXX_IRQMASK0_ADCDONEM MC13XXX_IRQSTAT0_ADCDONEI
-#define MC13XXX_IRQMASK0_ADCBISDONEM MC13XXX_IRQSTAT0_ADCBISDONEI
-#define MC13XXX_IRQMASK0_TSM MC13XXX_IRQSTAT0_TSI
-#define MC13783_IRQMASK0_WHIGHM MC13783_IRQSTAT0_WHIGHI
-#define MC13783_IRQMASK0_WLOWM MC13783_IRQSTAT0_WLOWI
-#define MC13XXX_IRQMASK0_CHGDETM MC13XXX_IRQSTAT0_CHGDETI
-#define MC13783_IRQMASK0_CHGOVM MC13783_IRQSTAT0_CHGOVI
-#define MC13XXX_IRQMASK0_CHGREVM MC13XXX_IRQSTAT0_CHGREVI
-#define MC13XXX_IRQMASK0_CHGSHORTM MC13XXX_IRQSTAT0_CHGSHORTI
-#define MC13XXX_IRQMASK0_CCCVM MC13XXX_IRQSTAT0_CCCVI
-#define MC13XXX_IRQMASK0_CHGCURRM MC13XXX_IRQSTAT0_CHGCURRI
-#define MC13XXX_IRQMASK0_BPONM MC13XXX_IRQSTAT0_BPONI
-#define MC13XXX_IRQMASK0_LOBATLM MC13XXX_IRQSTAT0_LOBATLI
-#define MC13XXX_IRQMASK0_LOBATHM MC13XXX_IRQSTAT0_LOBATHI
-#define MC13783_IRQMASK0_UDPM MC13783_IRQSTAT0_UDPI
-#define MC13783_IRQMASK0_USBM MC13783_IRQSTAT0_USBI
-#define MC13783_IRQMASK0_IDM MC13783_IRQSTAT0_IDI
-#define MC13783_IRQMASK0_SE1M MC13783_IRQSTAT0_SE1I
-#define MC13783_IRQMASK0_CKDETM MC13783_IRQSTAT0_CKDETI
-#define MC13783_IRQMASK0_UDMM MC13783_IRQSTAT0_UDMI
-
#define MC13XXX_IRQSTAT1 3
-#define MC13XXX_IRQSTAT1_1HZI (1 << 0)
-#define MC13XXX_IRQSTAT1_TODAI (1 << 1)
-#define MC13783_IRQSTAT1_ONOFD1I (1 << 3)
-#define MC13783_IRQSTAT1_ONOFD2I (1 << 4)
-#define MC13783_IRQSTAT1_ONOFD3I (1 << 5)
-#define MC13XXX_IRQSTAT1_SYSRSTI (1 << 6)
-#define MC13XXX_IRQSTAT1_RTCRSTI (1 << 7)
-#define MC13XXX_IRQSTAT1_PCI (1 << 8)
-#define MC13XXX_IRQSTAT1_WARMI (1 << 9)
-#define MC13XXX_IRQSTAT1_MEMHLDI (1 << 10)
-#define MC13783_IRQSTAT1_PWRRDYI (1 << 11)
-#define MC13XXX_IRQSTAT1_THWARNLI (1 << 12)
-#define MC13XXX_IRQSTAT1_THWARNHI (1 << 13)
-#define MC13XXX_IRQSTAT1_CLKI (1 << 14)
-#define MC13783_IRQSTAT1_SEMAFI (1 << 15)
-#define MC13783_IRQSTAT1_MC2BI (1 << 17)
-#define MC13783_IRQSTAT1_HSDETI (1 << 18)
-#define MC13783_IRQSTAT1_HSLI (1 << 19)
-#define MC13783_IRQSTAT1_ALSPTHI (1 << 20)
-#define MC13783_IRQSTAT1_AHSSHORTI (1 << 21)
-
#define MC13XXX_IRQMASK1 4
-#define MC13XXX_IRQMASK1_1HZM MC13XXX_IRQSTAT1_1HZI
-#define MC13XXX_IRQMASK1_TODAM MC13XXX_IRQSTAT1_TODAI
-#define MC13783_IRQMASK1_ONOFD1M MC13783_IRQSTAT1_ONOFD1I
-#define MC13783_IRQMASK1_ONOFD2M MC13783_IRQSTAT1_ONOFD2I
-#define MC13783_IRQMASK1_ONOFD3M MC13783_IRQSTAT1_ONOFD3I
-#define MC13XXX_IRQMASK1_SYSRSTM MC13XXX_IRQSTAT1_SYSRSTI
-#define MC13XXX_IRQMASK1_RTCRSTM MC13XXX_IRQSTAT1_RTCRSTI
-#define MC13XXX_IRQMASK1_PCM MC13XXX_IRQSTAT1_PCI
-#define MC13XXX_IRQMASK1_WARMM MC13XXX_IRQSTAT1_WARMI
-#define MC13XXX_IRQMASK1_MEMHLDM MC13XXX_IRQSTAT1_MEMHLDI
-#define MC13783_IRQMASK1_PWRRDYM MC13783_IRQSTAT1_PWRRDYI
-#define MC13XXX_IRQMASK1_THWARNLM MC13XXX_IRQSTAT1_THWARNLI
-#define MC13XXX_IRQMASK1_THWARNHM MC13XXX_IRQSTAT1_THWARNHI
-#define MC13XXX_IRQMASK1_CLKM MC13XXX_IRQSTAT1_CLKI
-#define MC13783_IRQMASK1_SEMAFM MC13783_IRQSTAT1_SEMAFI
-#define MC13783_IRQMASK1_MC2BM MC13783_IRQSTAT1_MC2BI
-#define MC13783_IRQMASK1_HSDETM MC13783_IRQSTAT1_HSDETI
-#define MC13783_IRQMASK1_HSLM MC13783_IRQSTAT1_HSLI
-#define MC13783_IRQMASK1_ALSPTHM MC13783_IRQSTAT1_ALSPTHI
-#define MC13783_IRQMASK1_AHSSHORTM MC13783_IRQSTAT1_AHSSHORTI
#define MC13XXX_REVISION 7
#define MC13XXX_REVISION_REVMETAL (0x07 << 0)
@@ -189,45 +101,21 @@ EXPORT_SYMBOL(mc13xxx_reg_rmw);
int mc13xxx_irq_mask(struct mc13xxx *mc13xxx, int irq)
{
- int ret;
- unsigned int offmask = irq < 24 ? MC13XXX_IRQMASK0 : MC13XXX_IRQMASK1;
- u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
- u32 mask;
-
- if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
- return -EINVAL;
-
- ret = mc13xxx_reg_read(mc13xxx, offmask, &mask);
- if (ret)
- return ret;
+ int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq);
- if (mask & irqbit)
- /* already masked */
- return 0;
+ disable_irq_nosync(virq);
- return mc13xxx_reg_write(mc13xxx, offmask, mask | irqbit);
+ return 0;
}
EXPORT_SYMBOL(mc13xxx_irq_mask);
int mc13xxx_irq_unmask(struct mc13xxx *mc13xxx, int irq)
{
- int ret;
- unsigned int offmask = irq < 24 ? MC13XXX_IRQMASK0 : MC13XXX_IRQMASK1;
- u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
- u32 mask;
-
- if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
- return -EINVAL;
+ int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq);
- ret = mc13xxx_reg_read(mc13xxx, offmask, &mask);
- if (ret)
- return ret;
+ enable_irq(virq);
- if (!(mask & irqbit))
- /* already unmasked */
- return 0;
-
- return mc13xxx_reg_write(mc13xxx, offmask, mask & ~irqbit);
+ return 0;
}
EXPORT_SYMBOL(mc13xxx_irq_unmask);
@@ -239,7 +127,7 @@ int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq,
unsigned int offstat = irq < 24 ? MC13XXX_IRQSTAT0 : MC13XXX_IRQSTAT1;
u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
- if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
+ if (irq < 0 || irq >= ARRAY_SIZE(mc13xxx->irqs))
return -EINVAL;
if (enabled) {
@@ -266,147 +154,26 @@ int mc13xxx_irq_status(struct mc13xxx *mc13xxx, int irq,
}
EXPORT_SYMBOL(mc13xxx_irq_status);
-int mc13xxx_irq_ack(struct mc13xxx *mc13xxx, int irq)
-{
- unsigned int offstat = irq < 24 ? MC13XXX_IRQSTAT0 : MC13XXX_IRQSTAT1;
- unsigned int val = 1 << (irq < 24 ? irq : irq - 24);
-
- BUG_ON(irq < 0 || irq >= MC13XXX_NUM_IRQ);
-
- return mc13xxx_reg_write(mc13xxx, offstat, val);
-}
-EXPORT_SYMBOL(mc13xxx_irq_ack);
-
-int mc13xxx_irq_request_nounmask(struct mc13xxx *mc13xxx, int irq,
- irq_handler_t handler, const char *name, void *dev)
-{
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
- BUG_ON(!handler);
-
- if (irq < 0 || irq >= MC13XXX_NUM_IRQ)
- return -EINVAL;
-
- if (mc13xxx->irqhandler[irq])
- return -EBUSY;
-
- mc13xxx->irqhandler[irq] = handler;
- mc13xxx->irqdata[irq] = dev;
-
- return 0;
-}
-EXPORT_SYMBOL(mc13xxx_irq_request_nounmask);
-
int mc13xxx_irq_request(struct mc13xxx *mc13xxx, int irq,
irq_handler_t handler, const char *name, void *dev)
{
- int ret;
+ int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq);
- ret = mc13xxx_irq_request_nounmask(mc13xxx, irq, handler, name, dev);
- if (ret)
- return ret;
-
- ret = mc13xxx_irq_unmask(mc13xxx, irq);
- if (ret) {
- mc13xxx->irqhandler[irq] = NULL;
- mc13xxx->irqdata[irq] = NULL;
- return ret;
- }
-
- return 0;
+ return devm_request_threaded_irq(mc13xxx->dev, virq, NULL, handler,
+ 0, name, dev);
}
EXPORT_SYMBOL(mc13xxx_irq_request);
int mc13xxx_irq_free(struct mc13xxx *mc13xxx, int irq, void *dev)
{
- int ret;
- BUG_ON(!mutex_is_locked(&mc13xxx->lock));
+ int virq = regmap_irq_get_virq(mc13xxx->irq_data, irq);
- if (irq < 0 || irq >= MC13XXX_NUM_IRQ || !mc13xxx->irqhandler[irq] ||
- mc13xxx->irqdata[irq] != dev)
- return -EINVAL;
-
- ret = mc13xxx_irq_mask(mc13xxx, irq);
- if (ret)
- return ret;
-
- mc13xxx->irqhandler[irq] = NULL;
- mc13xxx->irqdata[irq] = NULL;
+ devm_free_irq(mc13xxx->dev, virq, dev);
return 0;
}
EXPORT_SYMBOL(mc13xxx_irq_free);
-static inline irqreturn_t mc13xxx_irqhandler(struct mc13xxx *mc13xxx, int irq)
-{
- return mc13xxx->irqhandler[irq](irq, mc13xxx->irqdata[irq]);
-}
-
-/*
- * returns: number of handled irqs or negative error
- * locking: holds mc13xxx->lock
- */
-static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx,
- unsigned int offstat, unsigned int offmask, int baseirq)
-{
- u32 stat, mask;
- int ret = mc13xxx_reg_read(mc13xxx, offstat, &stat);
- int num_handled = 0;
-
- if (ret)
- return ret;
-
- ret = mc13xxx_reg_read(mc13xxx, offmask, &mask);
- if (ret)
- return ret;
-
- while (stat & ~mask) {
- int irq = __ffs(stat & ~mask);
-
- stat &= ~(1 << irq);
-
- if (likely(mc13xxx->irqhandler[baseirq + irq])) {
- irqreturn_t handled;
-
- handled = mc13xxx_irqhandler(mc13xxx, baseirq + irq);
- if (handled == IRQ_HANDLED)
- num_handled++;
- } else {
- dev_err(mc13xxx->dev,
- "BUG: irq %u but no handler\n",
- baseirq + irq);
-
- mask |= 1 << irq;
-
- ret = mc13xxx_reg_write(mc13xxx, offmask, mask);
- }
- }
-
- return num_handled;
-}
-
-static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
-{
- struct mc13xxx *mc13xxx = data;
- irqreturn_t ret;
- int handled = 0;
-
- mc13xxx_lock(mc13xxx);
-
- ret = mc13xxx_irq_handle(mc13xxx, MC13XXX_IRQSTAT0,
- MC13XXX_IRQMASK0, 0);
- if (ret > 0)
- handled = 1;
-
- ret = mc13xxx_irq_handle(mc13xxx, MC13XXX_IRQSTAT1,
- MC13XXX_IRQMASK1, 24);
- if (ret > 0)
- handled = 1;
-
- mc13xxx_unlock(mc13xxx);
-
- return IRQ_RETVAL(handled);
-}
-
#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
static void mc13xxx_print_revision(struct mc13xxx *mc13xxx, u32 revision)
{
@@ -475,8 +242,6 @@ static irqreturn_t mc13xxx_handler_adcdone(int irq, void *data)
{
struct mc13xxx_adcdone_data *adcdone_data = data;
- mc13xxx_irq_ack(adcdone_data->mc13xxx, irq);
-
complete_all(&adcdone_data->done);
return IRQ_HANDLED;
@@ -544,7 +309,6 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
mc13xxx_handler_adcdone, __func__, &adcdone_data);
- mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
mc13xxx_reg_write(mc13xxx, MC13XXX_ADC0, adc0);
mc13xxx_reg_write(mc13xxx, MC13XXX_ADC1, adc1);
@@ -599,7 +363,8 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
if (!cell.name)
return -ENOMEM;
- return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0, NULL);
+ return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0,
+ regmap_irq_get_domain(mc13xxx->irq_data));
}
static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
@@ -640,8 +405,8 @@ int mc13xxx_common_init(struct device *dev)
{
struct mc13xxx_platform_data *pdata = dev_get_platdata(dev);
struct mc13xxx *mc13xxx = dev_get_drvdata(dev);
- int ret;
u32 revision;
+ int i, ret;
mc13xxx->dev = dev;
@@ -651,31 +416,32 @@ int mc13xxx_common_init(struct device *dev)
mc13xxx->variant->print_revision(mc13xxx, revision);
- /* mask all irqs */
- ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK0, 0x00ffffff);
- if (ret)
- return ret;
+ for (i = 0; i < ARRAY_SIZE(mc13xxx->irqs); i++) {
+ mc13xxx->irqs[i].reg_offset = i / MC13XXX_IRQ_PER_REG;
+ mc13xxx->irqs[i].mask = BIT(i % MC13XXX_IRQ_PER_REG);
+ }
- ret = mc13xxx_reg_write(mc13xxx, MC13XXX_IRQMASK1, 0x00ffffff);
+ mc13xxx->irq_chip.name = dev_name(dev);
+ mc13xxx->irq_chip.status_base = MC13XXX_IRQSTAT0;
+ mc13xxx->irq_chip.mask_base = MC13XXX_IRQMASK0;
+ mc13xxx->irq_chip.ack_base = MC13XXX_IRQSTAT0;
+ mc13xxx->irq_chip.irq_reg_stride = MC13XXX_IRQSTAT1 - MC13XXX_IRQSTAT0;
+ mc13xxx->irq_chip.init_ack_masked = true;
+ mc13xxx->irq_chip.use_ack = true;
+ mc13xxx->irq_chip.num_regs = MC13XXX_IRQ_REG_CNT;
+ mc13xxx->irq_chip.irqs = mc13xxx->irqs;
+ mc13xxx->irq_chip.num_irqs = ARRAY_SIZE(mc13xxx->irqs);
+
+ ret = regmap_add_irq_chip(mc13xxx->regmap, mc13xxx->irq, IRQF_ONESHOT,
+ 0, &mc13xxx->irq_chip, &mc13xxx->irq_data);
if (ret)
return ret;
mutex_init(&mc13xxx->lock);
- ret = request_threaded_irq(mc13xxx->irq, NULL, mc13xxx_irq_thread,
- IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
- if (ret)
- return ret;
-
if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
mc13xxx->flags = pdata->flags;
- if (mc13xxx->flags & MC13XXX_USE_ADC)
- mc13xxx_add_subdevice(mc13xxx, "%s-adc");
-
- if (mc13xxx->flags & MC13XXX_USE_RTC)
- mc13xxx_add_subdevice(mc13xxx, "%s-rtc");
-
if (pdata) {
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
&pdata->regulators, sizeof(pdata->regulators));
@@ -699,6 +465,12 @@ int mc13xxx_common_init(struct device *dev)
mc13xxx_add_subdevice(mc13xxx, "%s-ts");
}
+ if (mc13xxx->flags & MC13XXX_USE_ADC)
+ mc13xxx_add_subdevice(mc13xxx, "%s-adc");
+
+ if (mc13xxx->flags & MC13XXX_USE_RTC)
+ mc13xxx_add_subdevice(mc13xxx, "%s-rtc");
+
return 0;
}
EXPORT_SYMBOL_GPL(mc13xxx_common_init);
@@ -707,8 +479,8 @@ int mc13xxx_common_exit(struct device *dev)
{
struct mc13xxx *mc13xxx = dev_get_drvdata(dev);
- free_irq(mc13xxx->irq, mc13xxx);
mfd_remove_devices(dev);
+ regmap_del_irq_chip(mc13xxx->irq, mc13xxx->irq_data);
mutex_destroy(&mc13xxx->lock);
return 0;
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
index ae7f1659f5d1..33677d1dcf66 100644
--- a/drivers/mfd/mc13xxx.h
+++ b/drivers/mfd/mc13xxx.h
@@ -13,7 +13,9 @@
#include <linux/regmap.h>
#include <linux/mfd/mc13xxx.h>
-#define MC13XXX_NUMREGS 0x3f
+#define MC13XXX_NUMREGS 0x3f
+#define MC13XXX_IRQ_REG_CNT 2
+#define MC13XXX_IRQ_PER_REG 24
struct mc13xxx;
@@ -33,13 +35,14 @@ struct mc13xxx {
struct device *dev;
const struct mc13xxx_variant *variant;
+ struct regmap_irq irqs[MC13XXX_IRQ_PER_REG * MC13XXX_IRQ_REG_CNT];
+ struct regmap_irq_chip irq_chip;
+ struct regmap_irq_chip_data *irq_data;
+
struct mutex lock;
int irq;
int flags;
- irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
- void *irqdata[MC13XXX_NUM_IRQ];
-
int adcflags;
};
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 62e5e3617eb0..7f5066e39752 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -137,6 +137,7 @@ EXPORT_SYMBOL(mcp_reg_read);
void mcp_enable(struct mcp *mcp)
{
unsigned long flags;
+
spin_lock_irqsave(&mcp->lock, flags);
if (mcp->use_count++ == 0)
mcp->ops->enable(mcp);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index b48d80c367f9..33a9234b701c 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -445,7 +445,7 @@ static unsigned omap_usbhs_rev1_hostconfig(struct usbhs_hcd_omap *omap,
for (i = 0; i < omap->nports; i++) {
if (is_ehci_phy_mode(pdata->port_mode[i])) {
- reg &= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
+ reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
break;
}
}
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 41ab5e34d2ac..c87f7a0a53f8 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -244,20 +244,20 @@ static int pcf50633_probe(struct i2c_client *client,
for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
struct platform_device *pdev;
+ int j;
pdev = platform_device_alloc("pcf50633-regulator", i);
- if (!pdev) {
- dev_err(pcf->dev, "Cannot create regulator %d\n", i);
- continue;
- }
+ if (!pdev)
+ return -ENOMEM;
pdev->dev.parent = pcf->dev;
- if (platform_device_add_data(pdev, &pdata->reg_init_data[i],
- sizeof(pdata->reg_init_data[i])) < 0) {
+ ret = platform_device_add_data(pdev, &pdata->reg_init_data[i],
+ sizeof(pdata->reg_init_data[i]));
+ if (ret) {
platform_device_put(pdev);
- dev_err(pcf->dev, "Out of memory for regulator parameters %d\n",
- i);
- continue;
+ for (j = 0; j < i; j++)
+ platform_device_put(pcf->regulator_pdev[j]);
+ return ret;
}
pcf->regulator_pdev[i] = pdev;
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 959513803542..39904f77c049 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -186,11 +186,9 @@ static void pm8xxx_irq_mask_ack(struct irq_data *d)
{
struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int pmirq = irqd_to_hwirq(d);
- int irq_bit;
u8 block, config;
block = pmirq / 8;
- irq_bit = pmirq % 8;
config = chip->config[pmirq] | PM_IRQF_MASK_ALL | PM_IRQF_CLR;
pm8xxx_config_irq(chip, block, config);
@@ -200,11 +198,9 @@ static void pm8xxx_irq_unmask(struct irq_data *d)
{
struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int pmirq = irqd_to_hwirq(d);
- int irq_bit;
u8 block, config;
block = pmirq / 8;
- irq_bit = pmirq % 8;
config = chip->config[pmirq];
pm8xxx_config_irq(chip, block, config);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index 1d15735f9ef9..d01b8c249231 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -337,40 +337,64 @@ static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read, int timeout)
{
- struct completion trans_done;
- u8 dir;
- int err = 0, i, count;
- long timeleft;
- unsigned long flags;
- struct scatterlist *sg;
- enum dma_data_direction dma_dir;
- u32 val;
- dma_addr_t addr;
- unsigned int len;
+ int err = 0, count;
dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg);
+ count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
+ if (count < 1)
+ return -EINVAL;
+ dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
+
+ err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
+
+ rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
+
+int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read)
+{
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- /* don't transfer data during abort processing */
if (pcr->remove_pci)
return -EINVAL;
if ((sglist == NULL) || (num_sg <= 0))
return -EINVAL;
- if (read) {
- dir = DEVICE_TO_HOST;
- dma_dir = DMA_FROM_DEVICE;
- } else {
- dir = HOST_TO_DEVICE;
- dma_dir = DMA_TO_DEVICE;
- }
+ return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
- count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
- if (count < 1) {
- dev_err(&(pcr->pci->dev), "scatterlist map failed\n");
+void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int num_sg, bool read)
+{
+ enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
+}
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
+
+int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
+ int count, bool read, int timeout)
+{
+ struct completion trans_done;
+ struct scatterlist *sg;
+ dma_addr_t addr;
+ long timeleft;
+ unsigned long flags;
+ unsigned int len;
+ int i, err = 0;
+ u32 val;
+ u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
+
+ if (pcr->remove_pci)
+ return -ENODEV;
+
+ if ((sglist == NULL) || (count < 1))
return -EINVAL;
- }
- dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count);
val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
pcr->sgi = 0;
@@ -400,12 +424,10 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
}
spin_lock_irqsave(&pcr->lock, flags);
-
if (pcr->trans_result == TRANS_RESULT_FAIL)
err = -EINVAL;
else if (pcr->trans_result == TRANS_NO_DEVICE)
err = -ENODEV;
-
spin_unlock_irqrestore(&pcr->lock, flags);
out:
@@ -413,8 +435,6 @@ out:
pcr->done = NULL;
spin_unlock_irqrestore(&pcr->lock, flags);
- dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir);
-
if ((err < 0) && (err != -ENODEV))
rtsx_pci_stop_cmd(pcr);
@@ -423,7 +443,7 @@ out:
return err;
}
-EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
+EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
{
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index be06d0abbf19..dba7e2b6f8e9 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -28,8 +28,10 @@
#include <linux/mfd/samsung/s2mpa01.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps14.h>
+#include <linux/mfd/samsung/s2mpu02.h>
#include <linux/mfd/samsung/s5m8763.h>
#include <linux/mfd/samsung/s5m8767.h>
+#include <linux/regulator/machine.h>
#include <linux/regmap.h>
static const struct mfd_cell s5m8751_devs[] = {
@@ -89,6 +91,15 @@ static const struct mfd_cell s2mpa01_devs[] = {
},
};
+static const struct mfd_cell s2mpu02_devs[] = {
+ { .name = "s2mpu02-pmic", },
+ { .name = "s2mpu02-rtc", },
+ {
+ .name = "s2mpu02-clk",
+ .of_compatible = "samsung,s2mpu02-clk",
+ }
+};
+
#ifdef CONFIG_OF
static const struct of_device_id sec_dt_match[] = {
{ .compatible = "samsung,s5m8767-pmic",
@@ -103,6 +114,9 @@ static const struct of_device_id sec_dt_match[] = {
.compatible = "samsung,s2mpa01-pmic",
.data = (void *)S2MPA01,
}, {
+ .compatible = "samsung,s2mpu02-pmic",
+ .data = (void *)S2MPU02,
+ }, {
/* Sentinel */
},
};
@@ -132,6 +146,18 @@ static bool s2mps11_volatile(struct device *dev, unsigned int reg)
}
}
+static bool s2mpu02_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case S2MPU02_REG_INT1M:
+ case S2MPU02_REG_INT2M:
+ case S2MPU02_REG_INT3M:
+ return false;
+ default:
+ return true;
+ }
+}
+
static bool s5m8763_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -177,6 +203,15 @@ static const struct regmap_config s2mps14_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static const struct regmap_config s2mpu02_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = S2MPU02_REG_DVSDATA,
+ .volatile_reg = s2mpu02_volatile,
+ .cache_type = REGCACHE_FLAT,
+};
+
static const struct regmap_config s5m8763_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -238,6 +273,7 @@ static inline unsigned long sec_i2c_get_driver_data(struct i2c_client *i2c,
#ifdef CONFIG_OF
if (i2c->dev.of_node) {
const struct of_device_id *match;
+
match = of_match_node(sec_dt_match, i2c->dev.of_node);
return (unsigned long)match->data;
}
@@ -250,9 +286,10 @@ static int sec_pmic_probe(struct i2c_client *i2c,
{
struct sec_platform_data *pdata = dev_get_platdata(&i2c->dev);
const struct regmap_config *regmap;
+ const struct mfd_cell *sec_devs;
struct sec_pmic_dev *sec_pmic;
unsigned long device_type;
- int ret;
+ int ret, num_sec_devs;
sec_pmic = devm_kzalloc(&i2c->dev, sizeof(struct sec_pmic_dev),
GFP_KERNEL);
@@ -297,6 +334,9 @@ static int sec_pmic_probe(struct i2c_client *i2c,
case S5M8767X:
regmap = &s5m8767_regmap_config;
break;
+ case S2MPU02:
+ regmap = &s2mpu02_regmap_config;
+ break;
default:
regmap = &sec_regmap_config;
break;
@@ -319,34 +359,39 @@ static int sec_pmic_probe(struct i2c_client *i2c,
switch (sec_pmic->device_type) {
case S5M8751X:
- ret = mfd_add_devices(sec_pmic->dev, -1, s5m8751_devs,
- ARRAY_SIZE(s5m8751_devs), NULL, 0, NULL);
+ sec_devs = s5m8751_devs;
+ num_sec_devs = ARRAY_SIZE(s5m8751_devs);
break;
case S5M8763X:
- ret = mfd_add_devices(sec_pmic->dev, -1, s5m8763_devs,
- ARRAY_SIZE(s5m8763_devs), NULL, 0, NULL);
+ sec_devs = s5m8763_devs;
+ num_sec_devs = ARRAY_SIZE(s5m8763_devs);
break;
case S5M8767X:
- ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs,
- ARRAY_SIZE(s5m8767_devs), NULL, 0, NULL);
+ sec_devs = s5m8767_devs;
+ num_sec_devs = ARRAY_SIZE(s5m8767_devs);
break;
case S2MPA01:
- ret = mfd_add_devices(sec_pmic->dev, -1, s2mpa01_devs,
- ARRAY_SIZE(s2mpa01_devs), NULL, 0, NULL);
+ sec_devs = s2mpa01_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpa01_devs);
break;
case S2MPS11X:
- ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs,
- ARRAY_SIZE(s2mps11_devs), NULL, 0, NULL);
+ sec_devs = s2mps11_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps11_devs);
break;
case S2MPS14X:
- ret = mfd_add_devices(sec_pmic->dev, -1, s2mps14_devs,
- ARRAY_SIZE(s2mps14_devs), NULL, 0, NULL);
+ sec_devs = s2mps14_devs;
+ num_sec_devs = ARRAY_SIZE(s2mps14_devs);
+ break;
+ case S2MPU02:
+ sec_devs = s2mpu02_devs;
+ num_sec_devs = ARRAY_SIZE(s2mpu02_devs);
break;
default:
/* If this happens the probe function is problem */
BUG();
}
-
+ ret = mfd_add_devices(sec_pmic->dev, -1, sec_devs, num_sec_devs, NULL,
+ 0, NULL);
if (ret)
goto err_mfd;
@@ -387,6 +432,15 @@ static int sec_pmic_suspend(struct device *dev)
*/
disable_irq(sec_pmic->irq);
+ switch (sec_pmic->device_type) {
+ case S2MPS14X:
+ case S2MPU02:
+ regulator_suspend_prepare(PM_SUSPEND_MEM);
+ break;
+ default:
+ break;
+ }
+
return 0;
}
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 654e2c1dbf7a..f9a57869e3ec 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -20,6 +20,7 @@
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps14.h>
+#include <linux/mfd/samsung/s2mpu02.h>
#include <linux/mfd/samsung/s5m8763.h>
#include <linux/mfd/samsung/s5m8767.h>
@@ -161,6 +162,77 @@ static const struct regmap_irq s2mps14_irqs[] = {
},
};
+static const struct regmap_irq s2mpu02_irqs[] = {
+ [S2MPU02_IRQ_PWRONF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRONF_MASK,
+ },
+ [S2MPU02_IRQ_PWRONR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRONR_MASK,
+ },
+ [S2MPU02_IRQ_JIGONBF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_JIGONBF_MASK,
+ },
+ [S2MPU02_IRQ_JIGONBR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_JIGONBR_MASK,
+ },
+ [S2MPU02_IRQ_ACOKBF] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_ACOKBF_MASK,
+ },
+ [S2MPU02_IRQ_ACOKBR] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_ACOKBR_MASK,
+ },
+ [S2MPU02_IRQ_PWRON1S] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_PWRON1S_MASK,
+ },
+ [S2MPU02_IRQ_MRB] = {
+ .reg_offset = 0,
+ .mask = S2MPS11_IRQ_MRB_MASK,
+ },
+ [S2MPU02_IRQ_RTC60S] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTC60S_MASK,
+ },
+ [S2MPU02_IRQ_RTCA1] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTCA1_MASK,
+ },
+ [S2MPU02_IRQ_RTCA0] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTCA0_MASK,
+ },
+ [S2MPU02_IRQ_SMPL] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_SMPL_MASK,
+ },
+ [S2MPU02_IRQ_RTC1S] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_RTC1S_MASK,
+ },
+ [S2MPU02_IRQ_WTSR] = {
+ .reg_offset = 1,
+ .mask = S2MPS11_IRQ_WTSR_MASK,
+ },
+ [S2MPU02_IRQ_INT120C] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_INT120C_MASK,
+ },
+ [S2MPU02_IRQ_INT140C] = {
+ .reg_offset = 2,
+ .mask = S2MPS11_IRQ_INT140C_MASK,
+ },
+ [S2MPU02_IRQ_TSD] = {
+ .reg_offset = 2,
+ .mask = S2MPS14_IRQ_TSD_MASK,
+ },
+};
+
static const struct regmap_irq s5m8767_irqs[] = {
[S5M8767_IRQ_PWRR] = {
.reg_offset = 0,
@@ -327,6 +399,16 @@ static const struct regmap_irq_chip s2mps14_irq_chip = {
.ack_base = S2MPS14_REG_INT1,
};
+static const struct regmap_irq_chip s2mpu02_irq_chip = {
+ .name = "s2mpu02",
+ .irqs = s2mpu02_irqs,
+ .num_irqs = ARRAY_SIZE(s2mpu02_irqs),
+ .num_regs = 3,
+ .status_base = S2MPU02_REG_INT1,
+ .mask_base = S2MPU02_REG_INT1M,
+ .ack_base = S2MPU02_REG_INT1,
+};
+
static const struct regmap_irq_chip s5m8767_irq_chip = {
.name = "s5m8767",
.irqs = s5m8767_irqs,
@@ -351,6 +433,7 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
{
int ret = 0;
int type = sec_pmic->device_type;
+ const struct regmap_irq_chip *sec_irq_chip;
if (!sec_pmic->irq) {
dev_warn(sec_pmic->dev,
@@ -361,28 +444,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
switch (type) {
case S5M8763X:
- ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- sec_pmic->irq_base, &s5m8763_irq_chip,
- &sec_pmic->irq_data);
+ sec_irq_chip = &s5m8763_irq_chip;
break;
case S5M8767X:
- ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- sec_pmic->irq_base, &s5m8767_irq_chip,
- &sec_pmic->irq_data);
+ sec_irq_chip = &s5m8767_irq_chip;
break;
case S2MPS11X:
- ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- sec_pmic->irq_base, &s2mps11_irq_chip,
- &sec_pmic->irq_data);
+ sec_irq_chip = &s2mps11_irq_chip;
break;
case S2MPS14X:
- ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- sec_pmic->irq_base, &s2mps14_irq_chip,
- &sec_pmic->irq_data);
+ sec_irq_chip = &s2mps14_irq_chip;
+ break;
+ case S2MPU02:
+ sec_irq_chip = &s2mpu02_irq_chip;
break;
default:
dev_err(sec_pmic->dev, "Unknown device type %lu\n",
@@ -390,6 +464,10 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return -EINVAL;
}
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ sec_pmic->irq_base, sec_irq_chip,
+ &sec_pmic->irq_data);
if (ret != 0) {
dev_err(sec_pmic->dev, "Failed to register IRQ chip: %d\n", ret);
return ret;
diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c
index 6f1ef63086c9..2086b4665288 100644
--- a/drivers/mfd/si476x-cmd.c
+++ b/drivers/mfd/si476x-cmd.c
@@ -1228,8 +1228,8 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
}
static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
- struct si476x_rsq_status_args *rsqargs,
- struct si476x_rsq_status_report *report)
+ struct si476x_rsq_status_args *rsqargs,
+ struct si476x_rsq_status_report *report)
{
int err;
u8 resp[CMD_FM_RSQ_STATUS_A10_NRESP];
@@ -1434,10 +1434,10 @@ typedef int (*tune_freq_func_t) (struct si476x_core *core,
struct si476x_tune_freq_args *tuneargs);
static struct {
- int (*power_up) (struct si476x_core *,
- struct si476x_power_up_args *);
- int (*power_down) (struct si476x_core *,
- struct si476x_power_down_args *);
+ int (*power_up)(struct si476x_core *,
+ struct si476x_power_up_args *);
+ int (*power_down)(struct si476x_core *,
+ struct si476x_power_down_args *);
tune_freq_func_t fm_tune_freq;
tune_freq_func_t am_tune_freq;
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index a45f9c0a330a..5c054031c3f8 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -68,7 +68,7 @@ MODULE_DEVICE_TABLE(of, stmpe_of_match);
static int
stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
- int partnum;
+ enum stmpe_partnum partnum;
const struct of_device_id *of_id;
i2c_ci.data = (void *)id;
@@ -85,7 +85,7 @@ stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
dev_info(&i2c->dev, "matching on node name, compatible is preferred\n");
partnum = id->driver_data;
} else
- partnum = (int)of_id->data;
+ partnum = (enum stmpe_partnum)of_id->data;
return stmpe_probe(&i2c_ci, partnum);
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 3b6bfa7184ad..02a17c388e87 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1147,7 +1147,7 @@ static void stmpe_of_probe(struct stmpe_platform_data *pdata,
}
/* Called from client specific probe routines */
-int stmpe_probe(struct stmpe_client_info *ci, int partnum)
+int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum)
{
struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
struct device_node *np = ci->dev->of_node;
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 9e4d21d37a11..2d045f26f193 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -97,7 +97,7 @@ struct stmpe_client_info {
void (*init)(struct stmpe *stmpe);
};
-int stmpe_probe(struct stmpe_client_info *ci, int partnum);
+int stmpe_probe(struct stmpe_client_info *ci, enum stmpe_partnum partnum);
int stmpe_remove(struct stmpe *stmpe);
#define STMPE_ICR_LSB_HIGH (1 << 2)
diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c
index 718fc4d2adc0..283ab8d197e4 100644
--- a/drivers/mfd/sun6i-prcm.c
+++ b/drivers/mfd/sun6i-prcm.c
@@ -76,16 +76,46 @@ static const struct mfd_cell sun6i_a31_prcm_subdevs[] = {
},
};
+static const struct mfd_cell sun8i_a23_prcm_subdevs[] = {
+ {
+ .name = "sun8i-a23-apb0-clk",
+ .of_compatible = "allwinner,sun8i-a23-apb0-clk",
+ .num_resources = ARRAY_SIZE(sun6i_a31_apb0_clk_res),
+ .resources = sun6i_a31_apb0_clk_res,
+ },
+ {
+ .name = "sun6i-a31-apb0-gates-clk",
+ .of_compatible = "allwinner,sun8i-a23-apb0-gates-clk",
+ .num_resources = ARRAY_SIZE(sun6i_a31_apb0_gates_clk_res),
+ .resources = sun6i_a31_apb0_gates_clk_res,
+ },
+ {
+ .name = "sun6i-a31-apb0-clock-reset",
+ .of_compatible = "allwinner,sun6i-a31-clock-reset",
+ .num_resources = ARRAY_SIZE(sun6i_a31_apb0_rstc_res),
+ .resources = sun6i_a31_apb0_rstc_res,
+ },
+};
+
static const struct prcm_data sun6i_a31_prcm_data = {
.nsubdevs = ARRAY_SIZE(sun6i_a31_prcm_subdevs),
.subdevs = sun6i_a31_prcm_subdevs,
};
+static const struct prcm_data sun8i_a23_prcm_data = {
+ .nsubdevs = ARRAY_SIZE(sun8i_a23_prcm_subdevs),
+ .subdevs = sun8i_a23_prcm_subdevs,
+};
+
static const struct of_device_id sun6i_prcm_dt_ids[] = {
{
.compatible = "allwinner,sun6i-a31-prcm",
.data = &sun6i_a31_prcm_data,
},
+ {
+ .compatible = "allwinner,sun8i-a23-prcm",
+ .data = &sun8i_a23_prcm_data,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index bd83accc0f6d..0072e668c208 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -236,7 +236,7 @@ static void tc3589x_irq_unmap(struct irq_domain *d, unsigned int virq)
static struct irq_domain_ops tc3589x_irq_ops = {
.map = tc3589x_irq_map,
.unmap = tc3589x_irq_unmap,
- .xlate = irq_domain_xlate_twocell,
+ .xlate = irq_domain_xlate_onecell,
};
static int tc3589x_irq_init(struct tc3589x *tc3589x, struct device_node *np)
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 591a331d8d83..e71f88000ae5 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -147,11 +147,10 @@ static int tc6387xb_probe(struct platform_device *dev)
int irq, ret;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!iomem) {
+ if (!iomem)
return -EINVAL;
- }
- tc6387xb = kzalloc(sizeof *tc6387xb, GFP_KERNEL);
+ tc6387xb = kzalloc(sizeof(*tc6387xb), GFP_KERNEL);
if (!tc6387xb)
return -ENOMEM;
@@ -189,7 +188,7 @@ static int tc6387xb_probe(struct platform_device *dev)
if (pdata && pdata->enable)
pdata->enable(dev);
- printk(KERN_INFO "Toshiba tc6387xb initialised\n");
+ dev_info(&dev->dev, "Toshiba tc6387xb initialised\n");
ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
ARRAY_SIZE(tc6387xb_cells), iomem, irq, NULL);
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c
index b5dfa6e4e692..5de95c265c1a 100644
--- a/drivers/mfd/tps6105x.c
+++ b/drivers/mfd/tps6105x.c
@@ -141,7 +141,7 @@ static int tps6105x_probe(struct i2c_client *client,
int ret;
int i;
- tps6105x = kmalloc(sizeof(*tps6105x), GFP_KERNEL);
+ tps6105x = devm_kmalloc(&client->dev, sizeof(*tps6105x), GFP_KERNEL);
if (!tps6105x)
return -ENOMEM;
@@ -154,7 +154,7 @@ static int tps6105x_probe(struct i2c_client *client,
ret = tps6105x_startup(tps6105x);
if (ret) {
dev_err(&client->dev, "chip initialization failed\n");
- goto fail;
+ return ret;
}
/* Remove warning texts when you implement new cell drivers */
@@ -187,16 +187,8 @@ static int tps6105x_probe(struct i2c_client *client,
tps6105x_cells[i].pdata_size = sizeof(*tps6105x);
}
- ret = mfd_add_devices(&client->dev, 0, tps6105x_cells,
- ARRAY_SIZE(tps6105x_cells), NULL, 0, NULL);
- if (ret)
- goto fail;
-
- return 0;
-
-fail:
- kfree(tps6105x);
- return ret;
+ return mfd_add_devices(&client->dev, 0, tps6105x_cells,
+ ARRAY_SIZE(tps6105x_cells), NULL, 0, NULL);
}
static int tps6105x_remove(struct i2c_client *client)
@@ -210,7 +202,6 @@ static int tps6105x_remove(struct i2c_client *client)
TPS6105X_REG0_MODE_MASK,
TPS6105X_MODE_SHUTDOWN << TPS6105X_REG0_MODE_SHIFT);
- kfree(tps6105x);
return 0;
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index f9e42ea1cb1a..f243e75d28f3 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -387,7 +387,7 @@ static const struct of_device_id tps65910_of_match[] = {
MODULE_DEVICE_TABLE(of, tps65910_of_match);
static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
- int *chip_id)
+ unsigned long *chip_id)
{
struct device_node *np = client->dev.of_node;
struct tps65910_board *board_info;
@@ -401,7 +401,7 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
return NULL;
}
- *chip_id = (int)match->data;
+ *chip_id = (unsigned long)match->data;
board_info = devm_kzalloc(&client->dev, sizeof(*board_info),
GFP_KERNEL);
@@ -431,7 +431,7 @@ static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
#else
static inline
struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
- int *chip_id)
+ unsigned long *chip_id)
{
return NULL;
}
@@ -453,14 +453,14 @@ static void tps65910_power_off(void)
}
static int tps65910_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
struct tps65910 *tps65910;
struct tps65910_board *pmic_plat_data;
struct tps65910_board *of_pmic_plat_data = NULL;
struct tps65910_platform_data *init_data;
+ unsigned long chip_id = id->driver_data;
int ret = 0;
- int chip_id = id->driver_data;
pmic_plat_data = dev_get_platdata(&i2c->dev);
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 69a5178bf152..de60ad98bd9f 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -32,10 +32,9 @@ static int tps65912_spi_write(struct tps65912 *tps65912, u8 addr,
unsigned long spi_data = 1 << 23 | addr << 15 | *data;
struct spi_transfer xfer;
struct spi_message msg;
- u32 tx_buf, rx_buf;
+ u32 tx_buf;
tx_buf = spi_data;
- rx_buf = 0;
xfer.tx_buf = &tx_buf;
xfer.rx_buf = NULL;
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 596b1f657e21..b1dabba763cf 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -297,7 +297,7 @@ static irqreturn_t handle_twl4030_pih(int irq, void *devid)
ret = twl_i2c_read_u8(TWL_MODULE_PIH, &pih_isr,
REG_PIH_ISR_P1);
if (ret) {
- pr_warning("twl4030: I2C error %d reading PIH ISR\n", ret);
+ pr_warn("twl4030: I2C error %d reading PIH ISR\n", ret);
return IRQ_NONE;
}
@@ -338,7 +338,7 @@ static int twl4030_init_sih_modules(unsigned line)
irq_line = line;
/* disable all interrupts on our line */
- memset(buf, 0xff, sizeof buf);
+ memset(buf, 0xff, sizeof(buf));
sih = sih_modules;
for (i = 0; i < nr_sih_modules; i++, sih++) {
/* skip USB -- it's funky */
@@ -646,7 +646,7 @@ int twl4030_sih_setup(struct device *dev, int module, int irq_base)
if (status < 0)
return status;
- agent = kzalloc(sizeof *agent, GFP_KERNEL);
+ agent = kzalloc(sizeof(*agent), GFP_KERNEL);
if (!agent)
return -ENOMEM;
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index a6bb17d908b8..2807e1a95663 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -70,7 +70,7 @@ static int twl6030_interrupt_mapping[24] = {
BATDETECT_INTR_OFFSET, /* Bit 9 BAT */
SIMDETECT_INTR_OFFSET, /* Bit 10 SIM */
MMCDETECT_INTR_OFFSET, /* Bit 11 MMC */
- RSV_INTR_OFFSET, /* Bit 12 Reserved */
+ RSV_INTR_OFFSET, /* Bit 12 Reserved */
MADC_INTR_OFFSET, /* Bit 13 GPADC_RT_EOC */
MADC_INTR_OFFSET, /* Bit 14 GPADC_SW_EOC */
GASGAUGE_INTR_OFFSET, /* Bit 15 CC_AUTOCAL */
@@ -245,6 +245,7 @@ int twl6030_interrupt_unmask(u8 bit_mask, u8 offset)
{
int ret;
u8 unmask_value;
+
ret = twl_i2c_read_u8(TWL_MODULE_PIH, &unmask_value,
REG_INT_STS_A + offset);
unmask_value &= (~(bit_mask));
@@ -258,6 +259,7 @@ int twl6030_interrupt_mask(u8 bit_mask, u8 offset)
{
int ret;
u8 mask_value;
+
ret = twl_i2c_read_u8(TWL_MODULE_PIH, &mask_value,
REG_INT_STS_A + offset);
mask_value |= (bit_mask);
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index ae26d84b3a59..f9c06c542a41 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -700,7 +700,7 @@ static int twl6040_probe(struct i2c_client *client,
}
ret = regmap_add_irq_chip(twl6040->regmap, twl6040->irq, IRQF_ONESHOT,
- 0, &twl6040_irq_chip,&twl6040->irq_data);
+ 0, &twl6040_irq_chip, &twl6040->irq_data);
if (ret < 0)
goto gpio_err;
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index c8a993bd17ae..fb4d4bb0f47d 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -138,11 +138,11 @@ static const struct regmap_irq wm5102_irqs[ARIZONA_NUM_IRQ] = {
.reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1
},
- [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = {
- .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1
+ [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
},
- [ARIZONA_IRQ_SPK_SHUTDOWN] = {
- .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1
+ [ARIZONA_IRQ_SPK_OVERHEAT] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
},
[ARIZONA_IRQ_HPDET] = {
.reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 41a7f6fb7802..9b98ee559188 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -340,11 +340,11 @@ static const struct regmap_irq wm5110_irqs[ARIZONA_NUM_IRQ] = {
.reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1
},
- [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = {
- .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1
+ [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
},
- [ARIZONA_IRQ_SPK_SHUTDOWN] = {
- .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1
+ [ARIZONA_IRQ_SPK_OVERHEAT] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
},
[ARIZONA_IRQ_HPDET] = {
.reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
@@ -416,16 +416,28 @@ static const struct regmap_irq wm5110_irqs[ARIZONA_NUM_IRQ] = {
[ARIZONA_IRQ_ISRC2_CFG_ERR] = {
.reg_offset = 3, .mask = ARIZONA_ISRC2_CFG_ERR_EINT1
},
+ [ARIZONA_IRQ_HP3R_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP3R_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP3L_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP3L_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP2R_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP2R_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP2L_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP2L_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP1R_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP1R_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP1L_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP1L_DONE_EINT1
+ },
[ARIZONA_IRQ_BOOT_DONE] = {
.reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
},
- [ARIZONA_IRQ_DCS_DAC_DONE] = {
- .reg_offset = 4, .mask = ARIZONA_DCS_DAC_DONE_EINT1
- },
- [ARIZONA_IRQ_DCS_HP_DONE] = {
- .reg_offset = 4, .mask = ARIZONA_DCS_HP_DONE_EINT1
- },
[ARIZONA_IRQ_FLL2_CLOCK_OK] = {
.reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
},
@@ -445,6 +457,209 @@ const struct regmap_irq_chip wm5110_irq = {
};
EXPORT_SYMBOL_GPL(wm5110_irq);
+static const struct regmap_irq wm5110_revd_irqs[ARIZONA_NUM_IRQ] = {
+ [ARIZONA_IRQ_GP4] = { .reg_offset = 0, .mask = ARIZONA_GP4_EINT1 },
+ [ARIZONA_IRQ_GP3] = { .reg_offset = 0, .mask = ARIZONA_GP3_EINT1 },
+ [ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
+ [ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
+
+ [ARIZONA_IRQ_DSP4_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP4_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP3_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP3_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP2_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP2_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP1_RAM_RDY] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP1_RAM_RDY_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ8] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ8_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ7] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ7_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ6] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ6_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ5] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ5_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ4] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ4_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ3] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ3_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ2] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ2_EINT1
+ },
+ [ARIZONA_IRQ_DSP_IRQ1] = {
+ .reg_offset = 1, .mask = ARIZONA_DSP_IRQ1_EINT1
+ },
+
+ [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
+ },
+ [ARIZONA_IRQ_SPK_OVERHEAT] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
+ },
+ [ARIZONA_IRQ_HPDET] = {
+ .reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
+ },
+ [ARIZONA_IRQ_MICDET] = {
+ .reg_offset = 2, .mask = ARIZONA_MICDET_EINT1
+ },
+ [ARIZONA_IRQ_WSEQ_DONE] = {
+ .reg_offset = 2, .mask = ARIZONA_WSEQ_DONE_EINT1
+ },
+ [ARIZONA_IRQ_DRC2_SIG_DET] = {
+ .reg_offset = 2, .mask = ARIZONA_DRC2_SIG_DET_EINT1
+ },
+ [ARIZONA_IRQ_DRC1_SIG_DET] = {
+ .reg_offset = 2, .mask = ARIZONA_DRC1_SIG_DET_EINT1
+ },
+ [ARIZONA_IRQ_ASRC2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_ASRC1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_ASRC1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_UNDERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_UNDERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_OVERCLOCKED] = {
+ .reg_offset = 2, .mask = ARIZONA_OVERCLOCKED_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL2_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_LOCK] = {
+ .reg_offset = 2, .mask = ARIZONA_FLL1_LOCK_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_EINT1
+ },
+ [ARIZONA_IRQ_CLKGEN_ERR_ASYNC] = {
+ .reg_offset = 2, .mask = ARIZONA_CLKGEN_ERR_ASYNC_EINT1
+ },
+
+ [ARIZONA_IRQ_CTRLIF_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_V2_CTRLIF_ERR_EINT1
+ },
+ [ARIZONA_IRQ_MIXER_DROPPED_SAMPLES] = {
+ .reg_offset = 3, .mask = ARIZONA_V2_MIXER_DROPPED_SAMPLE_EINT1
+ },
+ [ARIZONA_IRQ_ASYNC_CLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_V2_ASYNC_CLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_SYSCLK_ENA_LOW] = {
+ .reg_offset = 3, .mask = ARIZONA_V2_SYSCLK_ENA_LOW_EINT1
+ },
+ [ARIZONA_IRQ_ISRC1_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_V2_ISRC1_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_ISRC2_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_V2_ISRC2_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_ISRC3_CFG_ERR] = {
+ .reg_offset = 3, .mask = ARIZONA_V2_ISRC3_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_HP3R_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP3R_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP3L_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP3L_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP2R_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP2R_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP2L_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP2L_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP1R_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP1R_DONE_EINT1
+ },
+ [ARIZONA_IRQ_HP1L_DONE] = {
+ .reg_offset = 3, .mask = ARIZONA_HP1L_DONE_EINT1
+ },
+
+ [ARIZONA_IRQ_BOOT_DONE] = {
+ .reg_offset = 4, .mask = ARIZONA_BOOT_DONE_EINT1
+ },
+ [ARIZONA_IRQ_ASRC_CFG_ERR] = {
+ .reg_offset = 4, .mask = ARIZONA_V2_ASRC_CFG_ERR_EINT1
+ },
+ [ARIZONA_IRQ_FLL2_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL2_CLOCK_OK_EINT1
+ },
+ [ARIZONA_IRQ_FLL1_CLOCK_OK] = {
+ .reg_offset = 4, .mask = ARIZONA_FLL1_CLOCK_OK_EINT1
+ },
+
+ [ARIZONA_IRQ_DSP_SHARED_WR_COLL] = {
+ .reg_offset = 5, .mask = ARIZONA_DSP_SHARED_WR_COLL_EINT1
+ },
+ [ARIZONA_IRQ_SPK_SHUTDOWN] = {
+ .reg_offset = 5, .mask = ARIZONA_SPK_SHUTDOWN_EINT1
+ },
+ [ARIZONA_IRQ_SPK1R_SHORT] = {
+ .reg_offset = 5, .mask = ARIZONA_SPK1R_SHORT_EINT1
+ },
+ [ARIZONA_IRQ_SPK1L_SHORT] = {
+ .reg_offset = 5, .mask = ARIZONA_SPK1L_SHORT_EINT1
+ },
+ [ARIZONA_IRQ_HP3R_SC_NEG] = {
+ .reg_offset = 5, .mask = ARIZONA_HP3R_SC_NEG_EINT1
+ },
+ [ARIZONA_IRQ_HP3R_SC_POS] = {
+ .reg_offset = 5, .mask = ARIZONA_HP3R_SC_POS_EINT1
+ },
+ [ARIZONA_IRQ_HP3L_SC_NEG] = {
+ .reg_offset = 5, .mask = ARIZONA_HP3L_SC_NEG_EINT1
+ },
+ [ARIZONA_IRQ_HP3L_SC_POS] = {
+ .reg_offset = 5, .mask = ARIZONA_HP3L_SC_POS_EINT1
+ },
+ [ARIZONA_IRQ_HP2R_SC_NEG] = {
+ .reg_offset = 5, .mask = ARIZONA_HP2R_SC_NEG_EINT1
+ },
+ [ARIZONA_IRQ_HP2R_SC_POS] = {
+ .reg_offset = 5, .mask = ARIZONA_HP2R_SC_POS_EINT1
+ },
+ [ARIZONA_IRQ_HP2L_SC_NEG] = {
+ .reg_offset = 5, .mask = ARIZONA_HP2L_SC_NEG_EINT1
+ },
+ [ARIZONA_IRQ_HP2L_SC_POS] = {
+ .reg_offset = 5, .mask = ARIZONA_HP2L_SC_POS_EINT1
+ },
+ [ARIZONA_IRQ_HP1R_SC_NEG] = {
+ .reg_offset = 5, .mask = ARIZONA_HP1R_SC_NEG_EINT1
+ },
+ [ARIZONA_IRQ_HP1R_SC_POS] = {
+ .reg_offset = 5, .mask = ARIZONA_HP1R_SC_POS_EINT1
+ },
+ [ARIZONA_IRQ_HP1L_SC_NEG] = {
+ .reg_offset = 5, .mask = ARIZONA_HP1L_SC_NEG_EINT1
+ },
+ [ARIZONA_IRQ_HP1L_SC_POS] = {
+ .reg_offset = 5, .mask = ARIZONA_HP1L_SC_POS_EINT1
+ },
+};
+
+const struct regmap_irq_chip wm5110_revd_irq = {
+ .name = "wm5110 IRQ",
+ .status_base = ARIZONA_INTERRUPT_STATUS_1,
+ .mask_base = ARIZONA_INTERRUPT_STATUS_1_MASK,
+ .ack_base = ARIZONA_INTERRUPT_STATUS_1,
+ .num_regs = 6,
+ .irqs = wm5110_revd_irqs,
+ .num_irqs = ARRAY_SIZE(wm5110_revd_irqs),
+};
+EXPORT_SYMBOL_GPL(wm5110_revd_irq);
+
static const struct reg_default wm5110_reg_default[] = {
{ 0x00000008, 0x0019 }, /* R8 - Ctrl IF SPI CFG 1 */
{ 0x00000009, 0x0001 }, /* R9 - Ctrl IF I2C1 CFG 1 */
@@ -1274,12 +1489,14 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000D0A, 0xFFFF }, /* R3338 - Interrupt Status 3 Mask */
{ 0x00000D0B, 0xFFFF }, /* R3339 - Interrupt Status 4 Mask */
{ 0x00000D0C, 0xFEFF }, /* R3340 - Interrupt Status 5 Mask */
+ { 0x00000D0D, 0xFFFF }, /* R3341 - Interrupt Status 6 Mask */
{ 0x00000D0F, 0x0000 }, /* R3343 - Interrupt Control */
{ 0x00000D18, 0xFFFF }, /* R3352 - IRQ2 Status 1 Mask */
{ 0x00000D19, 0xFFFF }, /* R3353 - IRQ2 Status 2 Mask */
{ 0x00000D1A, 0xFFFF }, /* R3354 - IRQ2 Status 3 Mask */
{ 0x00000D1B, 0xFFFF }, /* R3355 - IRQ2 Status 4 Mask */
{ 0x00000D1C, 0xFFFF }, /* R3356 - IRQ2 Status 5 Mask */
+ { 0x00000D1D, 0xFFFF }, /* R3357 - IRQ2 Status 6 Mask */
{ 0x00000D1F, 0x0000 }, /* R3359 - IRQ2 Control */
{ 0x00000D53, 0xFFFF }, /* R3411 - AOD IRQ Mask IRQ1 */
{ 0x00000D54, 0xFFFF }, /* R3412 - AOD IRQ Mask IRQ2 */
@@ -2311,22 +2528,26 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_INTERRUPT_STATUS_3:
case ARIZONA_INTERRUPT_STATUS_4:
case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_INTERRUPT_STATUS_6:
case ARIZONA_INTERRUPT_STATUS_1_MASK:
case ARIZONA_INTERRUPT_STATUS_2_MASK:
case ARIZONA_INTERRUPT_STATUS_3_MASK:
case ARIZONA_INTERRUPT_STATUS_4_MASK:
case ARIZONA_INTERRUPT_STATUS_5_MASK:
+ case ARIZONA_INTERRUPT_STATUS_6_MASK:
case ARIZONA_INTERRUPT_CONTROL:
case ARIZONA_IRQ2_STATUS_1:
case ARIZONA_IRQ2_STATUS_2:
case ARIZONA_IRQ2_STATUS_3:
case ARIZONA_IRQ2_STATUS_4:
case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_6:
case ARIZONA_IRQ2_STATUS_1_MASK:
case ARIZONA_IRQ2_STATUS_2_MASK:
case ARIZONA_IRQ2_STATUS_3_MASK:
case ARIZONA_IRQ2_STATUS_4_MASK:
case ARIZONA_IRQ2_STATUS_5_MASK:
+ case ARIZONA_IRQ2_STATUS_6_MASK:
case ARIZONA_IRQ2_CONTROL:
case ARIZONA_INTERRUPT_RAW_STATUS_2:
case ARIZONA_INTERRUPT_RAW_STATUS_3:
@@ -2335,6 +2556,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_INTERRUPT_RAW_STATUS_6:
case ARIZONA_INTERRUPT_RAW_STATUS_7:
case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_INTERRUPT_RAW_STATUS_9:
case ARIZONA_IRQ_PIN_STATUS:
case ARIZONA_AOD_WKUP_AND_TRIG:
case ARIZONA_AOD_IRQ1:
@@ -2610,11 +2832,13 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_INTERRUPT_STATUS_3:
case ARIZONA_INTERRUPT_STATUS_4:
case ARIZONA_INTERRUPT_STATUS_5:
+ case ARIZONA_INTERRUPT_STATUS_6:
case ARIZONA_IRQ2_STATUS_1:
case ARIZONA_IRQ2_STATUS_2:
case ARIZONA_IRQ2_STATUS_3:
case ARIZONA_IRQ2_STATUS_4:
case ARIZONA_IRQ2_STATUS_5:
+ case ARIZONA_IRQ2_STATUS_6:
case ARIZONA_INTERRUPT_RAW_STATUS_2:
case ARIZONA_INTERRUPT_RAW_STATUS_3:
case ARIZONA_INTERRUPT_RAW_STATUS_4:
@@ -2622,6 +2846,7 @@ static bool wm5110_volatile_register(struct device *dev, unsigned int reg)
case ARIZONA_INTERRUPT_RAW_STATUS_6:
case ARIZONA_INTERRUPT_RAW_STATUS_7:
case ARIZONA_INTERRUPT_RAW_STATUS_8:
+ case ARIZONA_INTERRUPT_RAW_STATUS_9:
case ARIZONA_IRQ_PIN_STATUS:
case ARIZONA_AOD_WKUP_AND_TRIG:
case ARIZONA_AOD_IRQ1:
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index f919def05e24..6a16a8a6f9fa 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -58,10 +58,10 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
}
static const struct i2c_device_id wm8350_i2c_id[] = {
- { "wm8350", 0 },
- { "wm8351", 0 },
- { "wm8352", 0 },
- { }
+ { "wm8350", 0 },
+ { "wm8351", 0 },
+ { "wm8352", 0 },
+ { }
};
MODULE_DEVICE_TABLE(i2c, wm8350_i2c_id);
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index cd01f7962dfd..813ff50f95b6 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -497,7 +497,8 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
if (pdata && pdata->irq_base > 0)
irq_base = pdata->irq_base;
- wm8350->irq_base = irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0);
+ wm8350->irq_base =
+ irq_alloc_descs(irq_base, 0, ARRAY_SIZE(wm8350_irqs), 0);
if (wm8350->irq_base < 0) {
dev_warn(wm8350->dev, "Allocating irqs failed with %d\n",
wm8350->irq_base);
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index 2fbce9c5950b..770a25696468 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -123,14 +123,23 @@ static struct reg_default wm1811_defaults[] = {
{ 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */
{ 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */
{ 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */
+ { 0x0411, 0x0000 }, /* R1041 - AIF1 ADC2 Filters */
{ 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */
{ 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */
+ { 0x0422, 0x0200 }, /* R1058 - AIF1 DAC2 Filters (1) */
+ { 0x0423, 0x0010 }, /* R1059 - AIF1 DAC2 Filters (2) */
{ 0x0430, 0x0068 }, /* R1072 - AIF1 DAC1 Noise Gate */
+ { 0x0431, 0x0068 }, /* R1073 - AIF1 DAC2 Noise Gate */
{ 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */
{ 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */
{ 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */
{ 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */
{ 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */
+ { 0x0450, 0x0098 }, /* R1104 - AIF1 DRC2 (1) */
+ { 0x0451, 0x0845 }, /* R1105 - AIF1 DRC2 (2) */
+ { 0x0452, 0x0000 }, /* R1106 - AIF1 DRC2 (3) */
+ { 0x0453, 0x0000 }, /* R1107 - AIF1 DRC2 (4) */
+ { 0x0454, 0x0000 }, /* R1108 - AIF1 DRC2 (5) */
{ 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
{ 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
{ 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
@@ -152,6 +161,27 @@ static struct reg_default wm1811_defaults[] = {
{ 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
{ 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
{ 0x0494, 0x0000 }, /* R1172 - AIF1 DAC1 EQ Band 1 C */
+ { 0x04A0, 0x6318 }, /* R1184 - AIF1 DAC2 EQ Gains (1) */
+ { 0x04A1, 0x6300 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */
+ { 0x04A2, 0x0FCA }, /* R1186 - AIF1 DAC2 EQ Band 1 A */
+ { 0x04A3, 0x0400 }, /* R1187 - AIF1 DAC2 EQ Band 1 B */
+ { 0x04A4, 0x00D8 }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */
+ { 0x04A5, 0x1EB5 }, /* R1189 - AIF1 DAC2 EQ Band 2 A */
+ { 0x04A6, 0xF145 }, /* R1190 - AIF1 DAC2 EQ Band 2 B */
+ { 0x04A7, 0x0B75 }, /* R1191 - AIF1 DAC2 EQ Band 2 C */
+ { 0x04A8, 0x01C5 }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */
+ { 0x04A9, 0x1C58 }, /* R1193 - AIF1 DAC2 EQ Band 3 A */
+ { 0x04AA, 0xF373 }, /* R1194 - AIF1 DAC2 EQ Band 3 B */
+ { 0x04AB, 0x0A54 }, /* R1195 - AIF1 DAC2 EQ Band 3 C */
+ { 0x04AC, 0x0558 }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */
+ { 0x04AD, 0x168E }, /* R1197 - AIF1 DAC2 EQ Band 4 A */
+ { 0x04AE, 0xF829 }, /* R1198 - AIF1 DAC2 EQ Band 4 B */
+ { 0x04AF, 0x07AD }, /* R1199 - AIF1 DAC2 EQ Band 4 C */
+ { 0x04B0, 0x1103 }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */
+ { 0x04B1, 0x0564 }, /* R1201 - AIF1 DAC2 EQ Band 5 A */
+ { 0x04B2, 0x0559 }, /* R1202 - AIF1 DAC2 EQ Band 5 B */
+ { 0x04B3, 0x4000 }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */
+ { 0x04B4, 0x0000 }, /* R1204 - AIF1 DAC2 EQ Band 1 C */
{ 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */
{ 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */
{ 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */
@@ -194,6 +224,8 @@ static struct reg_default wm1811_defaults[] = {
{ 0x0605, 0x0000 }, /* R1541 - AIF2ADC Right Mixer Routing */
{ 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
{ 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
+ { 0x0608, 0x0000 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */
+ { 0x0609, 0x0000 }, /* R1545 - AIF1 ADC2 Right Mixer Routing */
{ 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */
{ 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */
{ 0x0612, 0x02C0 }, /* R1554 - AIF2TX Left Volume */
@@ -846,14 +878,23 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg)
case WM8994_AIF1_DAC1_LEFT_VOLUME:
case WM8994_AIF1_DAC1_RIGHT_VOLUME:
case WM8994_AIF1_ADC1_FILTERS:
+ case WM8994_AIF1_ADC2_FILTERS:
case WM8994_AIF1_DAC1_FILTERS_1:
case WM8994_AIF1_DAC1_FILTERS_2:
+ case WM8994_AIF1_DAC2_FILTERS_1:
+ case WM8994_AIF1_DAC2_FILTERS_2:
case WM8958_AIF1_DAC1_NOISE_GATE:
+ case WM8958_AIF1_DAC2_NOISE_GATE:
case WM8994_AIF1_DRC1_1:
case WM8994_AIF1_DRC1_2:
case WM8994_AIF1_DRC1_3:
case WM8994_AIF1_DRC1_4:
case WM8994_AIF1_DRC1_5:
+ case WM8994_AIF1_DRC2_1:
+ case WM8994_AIF1_DRC2_2:
+ case WM8994_AIF1_DRC2_3:
+ case WM8994_AIF1_DRC2_4:
+ case WM8994_AIF1_DRC2_5:
case WM8994_AIF1_DAC1_EQ_GAINS_1:
case WM8994_AIF1_DAC1_EQ_GAINS_2:
case WM8994_AIF1_DAC1_EQ_BAND_1_A:
@@ -875,6 +916,27 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg)
case WM8994_AIF1_DAC1_EQ_BAND_5_B:
case WM8994_AIF1_DAC1_EQ_BAND_5_PG:
case WM8994_AIF1_DAC1_EQ_BAND_1_C:
+ case WM8994_AIF1_DAC2_EQ_GAINS_1:
+ case WM8994_AIF1_DAC2_EQ_GAINS_2:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_C:
case WM8994_AIF2_ADC_LEFT_VOLUME:
case WM8994_AIF2_ADC_RIGHT_VOLUME:
case WM8994_AIF2_DAC_LEFT_VOLUME:
@@ -917,6 +979,8 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg)
case WM8994_DAC2_RIGHT_MIXER_ROUTING:
case WM8994_AIF1_ADC1_LEFT_MIXER_ROUTING:
case WM8994_AIF1_ADC1_RIGHT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC2_LEFT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC2_RIGHT_MIXER_ROUTING:
case WM8994_DAC1_LEFT_VOLUME:
case WM8994_DAC1_RIGHT_VOLUME:
case WM8994_DAC2_LEFT_VOLUME:
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index c7a81da64ee1..510da3b52324 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -65,11 +65,11 @@ static const struct regmap_irq wm8997_irqs[ARIZONA_NUM_IRQ] = {
[ARIZONA_IRQ_GP2] = { .reg_offset = 0, .mask = ARIZONA_GP2_EINT1 },
[ARIZONA_IRQ_GP1] = { .reg_offset = 0, .mask = ARIZONA_GP1_EINT1 },
- [ARIZONA_IRQ_SPK_SHUTDOWN_WARN] = {
- .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_WARN_EINT1
+ [ARIZONA_IRQ_SPK_OVERHEAT_WARN] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_WARN_EINT1
},
- [ARIZONA_IRQ_SPK_SHUTDOWN] = {
- .reg_offset = 2, .mask = ARIZONA_SPK_SHUTDOWN_EINT1
+ [ARIZONA_IRQ_SPK_OVERHEAT] = {
+ .reg_offset = 2, .mask = ARIZONA_SPK_OVERHEAT_EINT1
},
[ARIZONA_IRQ_HPDET] = {
.reg_offset = 2, .mask = ARIZONA_HPDET_EINT1
@@ -174,10 +174,10 @@ static const struct reg_default wm8997_reg_default[] = {
{ 0x00000062, 0x01FF }, /* R98 - Sample Rate Sequence Select 2 */
{ 0x00000063, 0x01FF }, /* R99 - Sample Rate Sequence Select 3 */
{ 0x00000064, 0x01FF }, /* R100 - Sample Rate Sequence Select 4 */
- { 0x00000068, 0x01FF }, /* R104 - Always On Triggers Sequence Select 3 */
- { 0x00000069, 0x01FF }, /* R105 - Always On Triggers Sequence Select 4 */
- { 0x0000006A, 0x01FF }, /* R106 - Always On Triggers Sequence Select 5 */
- { 0x0000006B, 0x01FF }, /* R107 - Always On Triggers Sequence Select 6 */
+ { 0x00000068, 0x01FF }, /* R104 - AlwaysOn Triggers Seq Select 3 */
+ { 0x00000069, 0x01FF }, /* R105 - AlwaysOn Triggers Seq Select 4 */
+ { 0x0000006A, 0x01FF }, /* R106 - AlwaysOn Triggers Seq Select 5 */
+ { 0x0000006B, 0x01FF }, /* R107 - AlwaysOn Triggers Seq Select 6 */
{ 0x00000070, 0x0000 }, /* R112 - Comfort Noise Generator */
{ 0x00000090, 0x0000 }, /* R144 - Haptics Control 1 */
{ 0x00000091, 0x7FFF }, /* R145 - Haptics Control 2 */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index ee9402324a23..b841180c7c74 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -51,16 +51,6 @@ config AD525X_DPOT_SPI
To compile this driver as a module, choose M here: the
module will be called ad525x_dpot-spi.
-config ATMEL_PWM
- tristate "Atmel AT32/AT91 PWM support"
- depends on HAVE_CLK
- depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
- help
- This option enables device driver support for the PWM channels
- on certain Atmel processors. Pulse Width Modulation is used for
- purposes including software controlled power-efficient backlights
- on LCD displays, motor control, and waveform generation.
-
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
depends on (AVR32 || ARCH_AT91)
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d59ce1261b38..5497d026e651 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_INTEL_MID_PTI) += pti.o
-obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_BMP085) += bmp085.o
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 22de13727641..60843a275abd 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -83,10 +83,17 @@ EXPORT_SYMBOL(ssc_free);
static struct atmel_ssc_platform_data at91rm9200_config = {
.use_dma = 0,
+ .has_fslen_ext = 0,
+};
+
+static struct atmel_ssc_platform_data at91sam9rl_config = {
+ .use_dma = 0,
+ .has_fslen_ext = 1,
};
static struct atmel_ssc_platform_data at91sam9g45_config = {
.use_dma = 1,
+ .has_fslen_ext = 1,
};
static const struct platform_device_id atmel_ssc_devtypes[] = {
@@ -94,6 +101,9 @@ static const struct platform_device_id atmel_ssc_devtypes[] = {
.name = "at91rm9200_ssc",
.driver_data = (unsigned long) &at91rm9200_config,
}, {
+ .name = "at91sam9rl_ssc",
+ .driver_data = (unsigned long) &at91sam9rl_config,
+ }, {
.name = "at91sam9g45_ssc",
.driver_data = (unsigned long) &at91sam9g45_config,
}, {
@@ -107,6 +117,9 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
.compatible = "atmel,at91rm9200-ssc",
.data = &at91rm9200_config,
}, {
+ .compatible = "atmel,at91sam9rl-ssc",
+ .data = &at91sam9rl_config,
+ }, {
.compatible = "atmel,at91sam9g45-ssc",
.data = &at91sam9g45_config,
}, {
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c
deleted file mode 100644
index a6dc56e1bc58..000000000000
--- a/drivers/misc/atmel_pwm.c
+++ /dev/null
@@ -1,402 +0,0 @@
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/atmel_pwm.h>
-
-
-/*
- * This is a simple driver for the PWM controller found in various newer
- * Atmel SOCs, including the AVR32 series and the AT91sam9263.
- *
- * Chips with current Linux ports have only 4 PWM channels, out of max 32.
- * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux).
- * Docs are inconsistent about the width of the channel counter registers;
- * it's at least 16 bits, but several places say 20 bits.
- */
-#define PWM_NCHAN 4 /* max 32 */
-
-struct pwm {
- spinlock_t lock;
- struct platform_device *pdev;
- u32 mask;
- int irq;
- void __iomem *base;
- struct clk *clk;
- struct pwm_channel *channel[PWM_NCHAN];
- void (*handler[PWM_NCHAN])(struct pwm_channel *);
-};
-
-
-/* global PWM controller registers */
-#define PWM_MR 0x00
-#define PWM_ENA 0x04
-#define PWM_DIS 0x08
-#define PWM_SR 0x0c
-#define PWM_IER 0x10
-#define PWM_IDR 0x14
-#define PWM_IMR 0x18
-#define PWM_ISR 0x1c
-
-static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val)
-{
- __raw_writel(val, p->base + offset);
-}
-
-static inline u32 pwm_readl(const struct pwm *p, unsigned offset)
-{
- return __raw_readl(p->base + offset);
-}
-
-static inline void __iomem *pwmc_regs(const struct pwm *p, int index)
-{
- return p->base + 0x200 + index * 0x20;
-}
-
-static struct pwm *pwm;
-
-static void pwm_dumpregs(struct pwm_channel *ch, char *tag)
-{
- struct device *dev = &pwm->pdev->dev;
-
- dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n",
- tag,
- pwm_readl(pwm, PWM_MR),
- pwm_readl(pwm, PWM_SR),
- pwm_readl(pwm, PWM_IMR));
- dev_dbg(dev,
- "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n",
- ch->index,
- pwm_channel_readl(ch, PWM_CMR),
- pwm_channel_readl(ch, PWM_CDTY),
- pwm_channel_readl(ch, PWM_CPRD),
- pwm_channel_readl(ch, PWM_CCNT));
-}
-
-
-/**
- * pwm_channel_alloc - allocate an unused PWM channel
- * @index: identifies the channel
- * @ch: structure to be initialized
- *
- * Drivers allocate PWM channels according to the board's wiring, and
- * matching board-specific setup code. Returns zero or negative errno.
- */
-int pwm_channel_alloc(int index, struct pwm_channel *ch)
-{
- unsigned long flags;
- int status = 0;
-
- if (!pwm)
- return -EPROBE_DEFER;
-
- if (!(pwm->mask & 1 << index))
- return -ENODEV;
-
- if (index < 0 || index >= PWM_NCHAN || !ch)
- return -EINVAL;
- memset(ch, 0, sizeof *ch);
-
- spin_lock_irqsave(&pwm->lock, flags);
- if (pwm->channel[index])
- status = -EBUSY;
- else {
- clk_enable(pwm->clk);
-
- ch->regs = pwmc_regs(pwm, index);
- ch->index = index;
-
- /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */
- ch->mck = clk_get_rate(pwm->clk);
-
- pwm->channel[index] = ch;
- pwm->handler[index] = NULL;
-
- /* channel and irq are always disabled when we return */
- pwm_writel(pwm, PWM_DIS, 1 << index);
- pwm_writel(pwm, PWM_IDR, 1 << index);
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
- return status;
-}
-EXPORT_SYMBOL(pwm_channel_alloc);
-
-static int pwmcheck(struct pwm_channel *ch)
-{
- int index;
-
- if (!pwm)
- return -ENODEV;
- if (!ch)
- return -EINVAL;
- index = ch->index;
- if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch)
- return -EINVAL;
-
- return index;
-}
-
-/**
- * pwm_channel_free - release a previously allocated channel
- * @ch: the channel being released
- *
- * The channel is completely shut down (counter and IRQ disabled),
- * and made available for re-use. Returns zero, or negative errno.
- */
-int pwm_channel_free(struct pwm_channel *ch)
-{
- unsigned long flags;
- int t;
-
- spin_lock_irqsave(&pwm->lock, flags);
- t = pwmcheck(ch);
- if (t >= 0) {
- pwm->channel[t] = NULL;
- pwm->handler[t] = NULL;
-
- /* channel and irq are always disabled when we return */
- pwm_writel(pwm, PWM_DIS, 1 << t);
- pwm_writel(pwm, PWM_IDR, 1 << t);
-
- clk_disable(pwm->clk);
- t = 0;
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
- return t;
-}
-EXPORT_SYMBOL(pwm_channel_free);
-
-int __pwm_channel_onoff(struct pwm_channel *ch, int enabled)
-{
- unsigned long flags;
- int t;
-
- /* OMITTED FUNCTIONALITY: starting several channels in synch */
-
- spin_lock_irqsave(&pwm->lock, flags);
- t = pwmcheck(ch);
- if (t >= 0) {
- pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t);
- t = 0;
- pwm_dumpregs(ch, enabled ? "enable" : "disable");
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
-
- return t;
-}
-EXPORT_SYMBOL(__pwm_channel_onoff);
-
-/**
- * pwm_clk_alloc - allocate and configure CLKA or CLKB
- * @prescale: from 0..10, the power of two used to divide MCK
- * @div: from 1..255, the linear divisor to use
- *
- * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated
- * clock will run with a period of (2^prescale * div) / MCK, or twice as
- * long if center aligned PWM output is used. The clock must later be
- * deconfigured using pwm_clk_free().
- */
-int pwm_clk_alloc(unsigned prescale, unsigned div)
-{
- unsigned long flags;
- u32 mr;
- u32 val = (prescale << 8) | div;
- int ret = -EBUSY;
-
- if (prescale >= 10 || div == 0 || div > 255)
- return -EINVAL;
-
- spin_lock_irqsave(&pwm->lock, flags);
- mr = pwm_readl(pwm, PWM_MR);
- if ((mr & 0xffff) == 0) {
- mr |= val;
- ret = PWM_CPR_CLKA;
- } else if ((mr & (0xffff << 16)) == 0) {
- mr |= val << 16;
- ret = PWM_CPR_CLKB;
- }
- if (ret > 0)
- pwm_writel(pwm, PWM_MR, mr);
- spin_unlock_irqrestore(&pwm->lock, flags);
- return ret;
-}
-EXPORT_SYMBOL(pwm_clk_alloc);
-
-/**
- * pwm_clk_free - deconfigure and release CLKA or CLKB
- *
- * Reverses the effect of pwm_clk_alloc().
- */
-void pwm_clk_free(unsigned clk)
-{
- unsigned long flags;
- u32 mr;
-
- spin_lock_irqsave(&pwm->lock, flags);
- mr = pwm_readl(pwm, PWM_MR);
- if (clk == PWM_CPR_CLKA)
- pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0));
- if (clk == PWM_CPR_CLKB)
- pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16));
- spin_unlock_irqrestore(&pwm->lock, flags);
-}
-EXPORT_SYMBOL(pwm_clk_free);
-
-/**
- * pwm_channel_handler - manage channel's IRQ handler
- * @ch: the channel
- * @handler: the handler to use, possibly NULL
- *
- * If the handler is non-null, the handler will be called after every
- * period of this PWM channel. If the handler is null, this channel
- * won't generate an IRQ.
- */
-int pwm_channel_handler(struct pwm_channel *ch,
- void (*handler)(struct pwm_channel *ch))
-{
- unsigned long flags;
- int t;
-
- spin_lock_irqsave(&pwm->lock, flags);
- t = pwmcheck(ch);
- if (t >= 0) {
- pwm->handler[t] = handler;
- pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t);
- t = 0;
- }
- spin_unlock_irqrestore(&pwm->lock, flags);
-
- return t;
-}
-EXPORT_SYMBOL(pwm_channel_handler);
-
-static irqreturn_t pwm_irq(int id, void *_pwm)
-{
- struct pwm *p = _pwm;
- irqreturn_t handled = IRQ_NONE;
- u32 irqstat;
- int index;
-
- spin_lock(&p->lock);
-
- /* ack irqs, then handle them */
- irqstat = pwm_readl(pwm, PWM_ISR);
-
- while (irqstat) {
- struct pwm_channel *ch;
- void (*handler)(struct pwm_channel *ch);
-
- index = ffs(irqstat) - 1;
- irqstat &= ~(1 << index);
- ch = pwm->channel[index];
- handler = pwm->handler[index];
- if (handler && ch) {
- spin_unlock(&p->lock);
- handler(ch);
- spin_lock(&p->lock);
- handled = IRQ_HANDLED;
- }
- }
-
- spin_unlock(&p->lock);
- return handled;
-}
-
-static int __init pwm_probe(struct platform_device *pdev)
-{
- struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int irq = platform_get_irq(pdev, 0);
- u32 *mp = pdev->dev.platform_data;
- struct pwm *p;
- int status = -EIO;
-
- if (pwm)
- return -EBUSY;
- if (!r || irq < 0 || !mp || !*mp)
- return -ENODEV;
- if (*mp & ~((1<<PWM_NCHAN)-1)) {
- dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n",
- *mp, PWM_NCHAN);
- return -EINVAL;
- }
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
- spin_lock_init(&p->lock);
- p->pdev = pdev;
- p->mask = *mp;
- p->irq = irq;
- p->base = ioremap(r->start, resource_size(r));
- if (!p->base)
- goto fail;
- p->clk = clk_get(&pdev->dev, "pwm_clk");
- if (IS_ERR(p->clk)) {
- status = PTR_ERR(p->clk);
- p->clk = NULL;
- goto fail;
- }
-
- status = request_irq(irq, pwm_irq, 0, pdev->name, p);
- if (status < 0)
- goto fail;
-
- pwm = p;
- platform_set_drvdata(pdev, p);
-
- return 0;
-
-fail:
- if (p->clk)
- clk_put(p->clk);
- if (p->base)
- iounmap(p->base);
-
- kfree(p);
- return status;
-}
-
-static int __exit pwm_remove(struct platform_device *pdev)
-{
- struct pwm *p = platform_get_drvdata(pdev);
-
- if (p != pwm)
- return -EINVAL;
-
- clk_enable(pwm->clk);
- pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1);
- pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1);
- clk_disable(pwm->clk);
-
- pwm = NULL;
-
- free_irq(p->irq, p);
- clk_put(p->clk);
- iounmap(p->base);
- kfree(p);
-
- return 0;
-}
-
-static struct platform_driver atmel_pwm_driver = {
- .driver = {
- .name = "atmel_pwm",
- .owner = THIS_MODULE,
- },
- .remove = __exit_p(pwm_remove),
-
- /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states;
- * and all AT91sam9263 states, albeit at reduced clock rate if
- * MCK becomes the slow clock (i.e. what Linux labels STR).
- */
-};
-
-module_platform_driver_probe(atmel_pwm_driver, pwm_probe);
-
-MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:atmel_pwm");
diff --git a/drivers/misc/fuse/Makefile b/drivers/misc/fuse/Makefile
new file mode 100644
index 000000000000..0679c4febc89
--- /dev/null
+++ b/drivers/misc/fuse/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index f5443a6c4915..9c9f6af29251 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -32,6 +32,7 @@
* (the low to high transition will not occur).
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -90,7 +91,7 @@ struct au1xmmc_host {
struct mmc_request *mrq;
u32 flags;
- u32 iobase;
+ void __iomem *iobase;
u32 clock;
u32 bus_width;
u32 power_mode;
@@ -118,6 +119,7 @@ struct au1xmmc_host {
struct au1xmmc_platform_data *platdata;
struct platform_device *pdev;
struct resource *ioarea;
+ struct clk *clk;
};
/* Status flags used by the host structure */
@@ -162,32 +164,33 @@ static inline int has_dbdma(void)
static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)
{
- u32 val = au_readl(HOST_CONFIG(host));
+ u32 val = __raw_readl(HOST_CONFIG(host));
val |= mask;
- au_writel(val, HOST_CONFIG(host));
- au_sync();
+ __raw_writel(val, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
}
static inline void FLUSH_FIFO(struct au1xmmc_host *host)
{
- u32 val = au_readl(HOST_CONFIG2(host));
+ u32 val = __raw_readl(HOST_CONFIG2(host));
- au_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
- au_sync_delay(1);
+ __raw_writel(val | SD_CONFIG2_FF, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+ mdelay(1);
/* SEND_STOP will turn off clock control - this re-enables it */
val &= ~SD_CONFIG2_DF;
- au_writel(val, HOST_CONFIG2(host));
- au_sync();
+ __raw_writel(val, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
}
static inline void IRQ_OFF(struct au1xmmc_host *host, u32 mask)
{
- u32 val = au_readl(HOST_CONFIG(host));
+ u32 val = __raw_readl(HOST_CONFIG(host));
val &= ~mask;
- au_writel(val, HOST_CONFIG(host));
- au_sync();
+ __raw_writel(val, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
}
static inline void SEND_STOP(struct au1xmmc_host *host)
@@ -197,12 +200,13 @@ static inline void SEND_STOP(struct au1xmmc_host *host)
WARN_ON(host->status != HOST_S_DATA);
host->status = HOST_S_STOP;
- config2 = au_readl(HOST_CONFIG2(host));
- au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
- au_sync();
+ config2 = __raw_readl(HOST_CONFIG2(host));
+ __raw_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
/* Send the stop command */
- au_writel(STOP_CMD, HOST_CMD(host));
+ __raw_writel(STOP_CMD, HOST_CMD(host));
+ wmb(); /* drain writebuffer */
}
static void au1xmmc_set_power(struct au1xmmc_host *host, int state)
@@ -296,28 +300,28 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
}
}
- au_writel(cmd->arg, HOST_CMDARG(host));
- au_sync();
+ __raw_writel(cmd->arg, HOST_CMDARG(host));
+ wmb(); /* drain writebuffer */
if (wait)
IRQ_OFF(host, SD_CONFIG_CR);
- au_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
- au_sync();
+ __raw_writel((mmccmd | SD_CMD_GO), HOST_CMD(host));
+ wmb(); /* drain writebuffer */
/* Wait for the command to go on the line */
- while (au_readl(HOST_CMD(host)) & SD_CMD_GO)
+ while (__raw_readl(HOST_CMD(host)) & SD_CMD_GO)
/* nop */;
/* Wait for the command to come back */
if (wait) {
- u32 status = au_readl(HOST_STATUS(host));
+ u32 status = __raw_readl(HOST_STATUS(host));
while (!(status & SD_STATUS_CR))
- status = au_readl(HOST_STATUS(host));
+ status = __raw_readl(HOST_STATUS(host));
/* Clear the CR status */
- au_writel(SD_STATUS_CR, HOST_STATUS(host));
+ __raw_writel(SD_STATUS_CR, HOST_STATUS(host));
IRQ_ON(host, SD_CONFIG_CR);
}
@@ -339,11 +343,11 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
data = mrq->cmd->data;
if (status == 0)
- status = au_readl(HOST_STATUS(host));
+ status = __raw_readl(HOST_STATUS(host));
/* The transaction is really over when the SD_STATUS_DB bit is clear */
while ((host->flags & HOST_F_XMIT) && (status & SD_STATUS_DB))
- status = au_readl(HOST_STATUS(host));
+ status = __raw_readl(HOST_STATUS(host));
data->error = 0;
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma.dir);
@@ -357,7 +361,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)
data->error = -EILSEQ;
/* Clear the CRC bits */
- au_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
+ __raw_writel(SD_STATUS_WC | SD_STATUS_RC, HOST_STATUS(host));
data->bytes_xfered = 0;
@@ -380,7 +384,7 @@ static void au1xmmc_tasklet_data(unsigned long param)
{
struct au1xmmc_host *host = (struct au1xmmc_host *)param;
- u32 status = au_readl(HOST_STATUS(host));
+ u32 status = __raw_readl(HOST_STATUS(host));
au1xmmc_data_complete(host, status);
}
@@ -412,15 +416,15 @@ static void au1xmmc_send_pio(struct au1xmmc_host *host)
max = AU1XMMC_MAX_TRANSFER;
for (count = 0; count < max; count++) {
- status = au_readl(HOST_STATUS(host));
+ status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_TH))
break;
val = *sg_ptr++;
- au_writel((unsigned long)val, HOST_TXPORT(host));
- au_sync();
+ __raw_writel((unsigned long)val, HOST_TXPORT(host));
+ wmb(); /* drain writebuffer */
}
host->pio.len -= count;
@@ -472,7 +476,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
max = AU1XMMC_MAX_TRANSFER;
for (count = 0; count < max; count++) {
- status = au_readl(HOST_STATUS(host));
+ status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_NE))
break;
@@ -494,7 +498,7 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
break;
}
- val = au_readl(HOST_RXPORT(host));
+ val = __raw_readl(HOST_RXPORT(host));
if (sg_ptr)
*sg_ptr++ = (unsigned char)(val & 0xFF);
@@ -537,10 +541,10 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
- r[0] = au_readl(host->iobase + SD_RESP3);
- r[1] = au_readl(host->iobase + SD_RESP2);
- r[2] = au_readl(host->iobase + SD_RESP1);
- r[3] = au_readl(host->iobase + SD_RESP0);
+ r[0] = __raw_readl(host->iobase + SD_RESP3);
+ r[1] = __raw_readl(host->iobase + SD_RESP2);
+ r[2] = __raw_readl(host->iobase + SD_RESP1);
+ r[3] = __raw_readl(host->iobase + SD_RESP0);
/* The CRC is omitted from the response, so really
* we only got 120 bytes, but the engine expects
@@ -559,7 +563,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
* that means that the OSR data starts at bit 31,
* so we can just read RESP0 and return that.
*/
- cmd->resp[0] = au_readl(host->iobase + SD_RESP0);
+ cmd->resp[0] = __raw_readl(host->iobase + SD_RESP0);
}
}
@@ -586,7 +590,7 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
u32 mask = SD_STATUS_DB | SD_STATUS_NE;
while((status & mask) != mask)
- status = au_readl(HOST_STATUS(host));
+ status = __raw_readl(HOST_STATUS(host));
}
au1xxx_dbdma_start(channel);
@@ -595,24 +599,17 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)
static void au1xmmc_set_clock(struct au1xmmc_host *host, int rate)
{
- unsigned int pbus = get_au1x00_speed();
- unsigned int divisor;
+ unsigned int pbus = clk_get_rate(host->clk);
+ unsigned int divisor = ((pbus / rate) / 2) - 1;
u32 config;
- /* From databook:
- * divisor = ((((cpuclock / sbus_divisor) / 2) / mmcclock) / 2) - 1
- */
- pbus /= ((au_readl(SYS_POWERCTRL) & 0x3) + 2);
- pbus /= 2;
- divisor = ((pbus / rate) / 2) - 1;
-
- config = au_readl(HOST_CONFIG(host));
+ config = __raw_readl(HOST_CONFIG(host));
config &= ~(SD_CONFIG_DIV);
config |= (divisor & SD_CONFIG_DIV) | SD_CONFIG_DE;
- au_writel(config, HOST_CONFIG(host));
- au_sync();
+ __raw_writel(config, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
}
static int au1xmmc_prepare_data(struct au1xmmc_host *host,
@@ -636,7 +633,7 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,
if (host->dma.len == 0)
return -ETIMEDOUT;
- au_writel(data->blksz - 1, HOST_BLKSIZE(host));
+ __raw_writel(data->blksz - 1, HOST_BLKSIZE(host));
if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {
int i;
@@ -723,31 +720,34 @@ static void au1xmmc_request(struct mmc_host* mmc, struct mmc_request* mrq)
static void au1xmmc_reset_controller(struct au1xmmc_host *host)
{
/* Apply the clock */
- au_writel(SD_ENABLE_CE, HOST_ENABLE(host));
- au_sync_delay(1);
+ __raw_writel(SD_ENABLE_CE, HOST_ENABLE(host));
+ wmb(); /* drain writebuffer */
+ mdelay(1);
- au_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
- au_sync_delay(5);
+ __raw_writel(SD_ENABLE_R | SD_ENABLE_CE, HOST_ENABLE(host));
+ wmb(); /* drain writebuffer */
+ mdelay(5);
- au_writel(~0, HOST_STATUS(host));
- au_sync();
+ __raw_writel(~0, HOST_STATUS(host));
+ wmb(); /* drain writebuffer */
- au_writel(0, HOST_BLKSIZE(host));
- au_writel(0x001fffff, HOST_TIMEOUT(host));
- au_sync();
+ __raw_writel(0, HOST_BLKSIZE(host));
+ __raw_writel(0x001fffff, HOST_TIMEOUT(host));
+ wmb(); /* drain writebuffer */
- au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
- au_sync();
+ __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
- au_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
- au_sync_delay(1);
+ __raw_writel(SD_CONFIG2_EN | SD_CONFIG2_FF, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
+ mdelay(1);
- au_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
- au_sync();
+ __raw_writel(SD_CONFIG2_EN, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
/* Configure interrupts */
- au_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
- au_sync();
+ __raw_writel(AU1XMMC_INTERRUPTS, HOST_CONFIG(host));
+ wmb(); /* drain writebuffer */
}
@@ -767,7 +767,7 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->clock = ios->clock;
}
- config2 = au_readl(HOST_CONFIG2(host));
+ config2 = __raw_readl(HOST_CONFIG2(host));
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
config2 |= SD_CONFIG2_BB;
@@ -780,8 +780,8 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
break;
}
- au_writel(config2, HOST_CONFIG2(host));
- au_sync();
+ __raw_writel(config2, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
}
#define STATUS_TIMEOUT (SD_STATUS_RAT | SD_STATUS_DT)
@@ -793,7 +793,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
struct au1xmmc_host *host = dev_id;
u32 status;
- status = au_readl(HOST_STATUS(host));
+ status = __raw_readl(HOST_STATUS(host));
if (!(status & SD_STATUS_I))
return IRQ_NONE; /* not ours */
@@ -839,8 +839,8 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)
status);
}
- au_writel(status, HOST_STATUS(host));
- au_sync();
+ __raw_writel(status, HOST_STATUS(host));
+ wmb(); /* drain writebuffer */
return IRQ_HANDLED;
}
@@ -976,7 +976,7 @@ static int au1xmmc_probe(struct platform_device *pdev)
goto out1;
}
- host->iobase = (unsigned long)ioremap(r->start, 0x3c);
+ host->iobase = ioremap(r->start, 0x3c);
if (!host->iobase) {
dev_err(&pdev->dev, "cannot remap mmio\n");
goto out2;
@@ -1025,6 +1025,16 @@ static int au1xmmc_probe(struct platform_device *pdev)
goto out3;
}
+ host->clk = clk_get(&pdev->dev, ALCHEMY_PERIPH_CLK);
+ if (IS_ERR(host->clk)) {
+ dev_err(&pdev->dev, "cannot find clock\n");
+ goto out_irq;
+ }
+ if (clk_prepare_enable(host->clk)) {
+ dev_err(&pdev->dev, "cannot enable clock\n");
+ goto out_clk;
+ }
+
host->status = HOST_S_IDLE;
/* board-specific carddetect setup, if any */
@@ -1075,7 +1085,7 @@ static int au1xmmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
- pr_info(DRIVER_NAME ": MMC Controller %d set up at %8.8X"
+ pr_info(DRIVER_NAME ": MMC Controller %d set up at %p"
" (mode=%s)\n", pdev->id, host->iobase,
host->flags & HOST_F_DMA ? "dma" : "pio");
@@ -1087,10 +1097,10 @@ out6:
led_classdev_unregister(host->platdata->led);
out5:
#endif
- au_writel(0, HOST_ENABLE(host));
- au_writel(0, HOST_CONFIG(host));
- au_writel(0, HOST_CONFIG2(host));
- au_sync();
+ __raw_writel(0, HOST_ENABLE(host));
+ __raw_writel(0, HOST_CONFIG(host));
+ __raw_writel(0, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
if (host->flags & HOST_F_DBDMA)
au1xmmc_dbdma_shutdown(host);
@@ -1101,7 +1111,10 @@ out5:
if (host->platdata && host->platdata->cd_setup &&
!(mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(mmc, 0);
-
+out_clk:
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+out_irq:
free_irq(host->irq, host);
out3:
iounmap((void *)host->iobase);
@@ -1130,10 +1143,10 @@ static int au1xmmc_remove(struct platform_device *pdev)
!(host->mmc->caps & MMC_CAP_NEEDS_POLL))
host->platdata->cd_setup(host->mmc, 0);
- au_writel(0, HOST_ENABLE(host));
- au_writel(0, HOST_CONFIG(host));
- au_writel(0, HOST_CONFIG2(host));
- au_sync();
+ __raw_writel(0, HOST_ENABLE(host));
+ __raw_writel(0, HOST_CONFIG(host));
+ __raw_writel(0, HOST_CONFIG2(host));
+ wmb(); /* drain writebuffer */
tasklet_kill(&host->data_task);
tasklet_kill(&host->finish_task);
@@ -1143,6 +1156,9 @@ static int au1xmmc_remove(struct platform_device *pdev)
au1xmmc_set_power(host, 0);
+ clk_disable_unprepare(host->clk);
+ clk_put(host->clk);
+
free_irq(host->irq, host);
iounmap((void *)host->iobase);
release_resource(host->ioarea);
@@ -1158,11 +1174,11 @@ static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct au1xmmc_host *host = platform_get_drvdata(pdev);
- au_writel(0, HOST_CONFIG2(host));
- au_writel(0, HOST_CONFIG(host));
- au_writel(0xffffffff, HOST_STATUS(host));
- au_writel(0, HOST_ENABLE(host));
- au_sync();
+ __raw_writel(0, HOST_CONFIG2(host));
+ __raw_writel(0, HOST_CONFIG(host));
+ __raw_writel(0xffffffff, HOST_STATUS(host));
+ __raw_writel(0, HOST_ENABLE(host));
+ wmb(); /* drain writebuffer */
return 0;
}
diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
index 0d519649b575..dfde4a210238 100644
--- a/drivers/mmc/host/rtsx_pci_sdmmc.c
+++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
@@ -24,6 +24,7 @@
#include <linux/highmem.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
@@ -36,7 +37,10 @@ struct realtek_pci_sdmmc {
struct rtsx_pcr *pcr;
struct mmc_host *mmc;
struct mmc_request *mrq;
+ struct workqueue_struct *workq;
+#define SDMMC_WORKQ_NAME "rtsx_pci_sdmmc_workq"
+ struct work_struct work;
struct mutex host_mutex;
u8 ssc_depth;
@@ -48,6 +52,11 @@ struct realtek_pci_sdmmc {
int power_state;
#define SDMMC_POWER_ON 1
#define SDMMC_POWER_OFF 0
+
+ unsigned int sg_count;
+ s32 cookie;
+ unsigned int cookie_sg_count;
+ bool using_cookie;
};
static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host)
@@ -86,6 +95,77 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host)
#define sd_print_debug_regs(host)
#endif /* DEBUG */
+/*
+ * sd_pre_dma_transfer - do dma_map_sg() or using cookie
+ *
+ * @pre: if called in pre_req()
+ * return:
+ * 0 - do dma_map_sg()
+ * 1 - using cookie
+ */
+static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host,
+ struct mmc_data *data, bool pre)
+{
+ struct rtsx_pcr *pcr = host->pcr;
+ int read = data->flags & MMC_DATA_READ;
+ int count = 0;
+ int using_cookie = 0;
+
+ if (!pre && data->host_cookie && data->host_cookie != host->cookie) {
+ dev_err(sdmmc_dev(host),
+ "error: data->host_cookie = %d, host->cookie = %d\n",
+ data->host_cookie, host->cookie);
+ data->host_cookie = 0;
+ }
+
+ if (pre || data->host_cookie != host->cookie) {
+ count = rtsx_pci_dma_map_sg(pcr, data->sg, data->sg_len, read);
+ } else {
+ count = host->cookie_sg_count;
+ using_cookie = 1;
+ }
+
+ if (pre) {
+ host->cookie_sg_count = count;
+ if (++host->cookie < 0)
+ host->cookie = 1;
+ data->host_cookie = host->cookie;
+ } else {
+ host->sg_count = count;
+ }
+
+ return using_cookie;
+}
+
+static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (data->host_cookie) {
+ dev_err(sdmmc_dev(host),
+ "error: reset data->host_cookie = %d\n",
+ data->host_cookie);
+ data->host_cookie = 0;
+ }
+
+ sd_pre_dma_transfer(host, data, true);
+ dev_dbg(sdmmc_dev(host), "pre dma sg: %d\n", host->cookie_sg_count);
+}
+
+static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct rtsx_pcr *pcr = host->pcr;
+ struct mmc_data *data = mrq->data;
+ int read = data->flags & MMC_DATA_READ;
+
+ rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read);
+ data->host_cookie = 0;
+}
+
static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt,
u8 *buf, int buf_len, int timeout)
{
@@ -415,7 +495,7 @@ static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq)
rtsx_pci_send_cmd_no_wait(pcr);
- err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000);
+ err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read, 10000);
if (err < 0) {
sd_clear_error(host);
return err;
@@ -640,12 +720,24 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode)
return 0;
}
-static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+static inline int sd_rw_cmd(struct mmc_command *cmd)
{
- struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ return mmc_op_multi(cmd->opcode) ||
+ (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
+ (cmd->opcode == MMC_WRITE_BLOCK);
+}
+
+static void sd_request(struct work_struct *work)
+{
+ struct realtek_pci_sdmmc *host = container_of(work,
+ struct realtek_pci_sdmmc, work);
struct rtsx_pcr *pcr = host->pcr;
+
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_request *mrq = host->mrq;
struct mmc_command *cmd = mrq->cmd;
struct mmc_data *data = mrq->data;
+
unsigned int data_size = 0;
int err;
@@ -677,13 +769,13 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (mrq->data)
data_size = data->blocks * data->blksz;
- if (!data_size || mmc_op_multi(cmd->opcode) ||
- (cmd->opcode == MMC_READ_SINGLE_BLOCK) ||
- (cmd->opcode == MMC_WRITE_BLOCK)) {
+ if (!data_size || sd_rw_cmd(cmd)) {
sd_send_cmd_get_rsp(host, cmd);
if (!cmd->error && data_size) {
sd_rw_multi(host, mrq);
+ if (!host->using_cookie)
+ sdmmc_post_req(host->mmc, host->mrq, 0);
if (mmc_op_multi(cmd->opcode) && mrq->stop)
sd_send_cmd_get_rsp(host, mrq->stop);
@@ -712,6 +804,21 @@ finish:
mmc_request_done(mmc, mrq);
}
+static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct realtek_pci_sdmmc *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ mutex_lock(&host->host_mutex);
+ host->mrq = mrq;
+ mutex_unlock(&host->host_mutex);
+
+ if (sd_rw_cmd(mrq->cmd))
+ host->using_cookie = sd_pre_dma_transfer(host, data, false);
+
+ queue_work(host->workq, &host->work);
+}
+
static int sd_set_bus_width(struct realtek_pci_sdmmc *host,
unsigned char bus_width)
{
@@ -1146,6 +1253,8 @@ out:
}
static const struct mmc_host_ops realtek_pci_sdmmc_ops = {
+ .pre_req = sdmmc_pre_req,
+ .post_req = sdmmc_post_req,
.request = sdmmc_request,
.set_ios = sdmmc_set_ios,
.get_ro = sdmmc_get_ro,
@@ -1224,10 +1333,16 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev)
return -ENOMEM;
host = mmc_priv(mmc);
+ host->workq = create_singlethread_workqueue(SDMMC_WORKQ_NAME);
+ if (!host->workq) {
+ mmc_free_host(mmc);
+ return -ENOMEM;
+ }
host->pcr = pcr;
host->mmc = mmc;
host->pdev = pdev;
host->power_state = SDMMC_POWER_OFF;
+ INIT_WORK(&host->work, sd_request);
platform_set_drvdata(pdev, host);
pcr->slots[RTSX_SD_CARD].p_dev = pdev;
pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event;
@@ -1255,6 +1370,8 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
pcr->slots[RTSX_SD_CARD].card_event = NULL;
mmc = host->mmc;
+ cancel_work_sync(&host->work);
+
mutex_lock(&host->host_mutex);
if (host->mrq) {
dev_dbg(&(pdev->dev),
@@ -1273,6 +1390,10 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev)
mmc_remove_host(mmc);
host->eject = true;
+ flush_workqueue(host->workq);
+ destroy_workqueue(host->workq);
+ host->workq = NULL;
+
mmc_free_host(mmc);
dev_dbg(&(pdev->dev),
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index e21fde9d4d7e..5a4bfe33112a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -58,7 +58,18 @@ static void cfi_amdstd_sync (struct mtd_info *);
static int cfi_amdstd_suspend (struct mtd_info *);
static void cfi_amdstd_resume (struct mtd_info *);
static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
+static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
+static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
+ size_t *, struct otp_info *);
static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
+ size_t *, u_char *);
+static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf);
@@ -518,6 +529,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
mtd->_sync = cfi_amdstd_sync;
mtd->_suspend = cfi_amdstd_suspend;
mtd->_resume = cfi_amdstd_resume;
+ mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
+ mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
+ mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
+ mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
+ mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
+ mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
mtd->writesize = 1;
@@ -628,6 +645,23 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
+ /*
+ * First calculate the timeout max according to timeout field
+ * of struct cfi_ident that probed from chip's CFI aera, if
+ * available. Specify a minimum of 2000us, in case the CFI data
+ * is wrong.
+ */
+ if (cfi->cfiq->BufWriteTimeoutTyp &&
+ cfi->cfiq->BufWriteTimeoutMax)
+ cfi->chips[i].buffer_write_time_max =
+ 1 << (cfi->cfiq->BufWriteTimeoutTyp +
+ cfi->cfiq->BufWriteTimeoutMax);
+ else
+ cfi->chips[i].buffer_write_time_max = 0;
+
+ cfi->chips[i].buffer_write_time_max =
+ max(cfi->chips[i].buffer_write_time_max, 2000);
+
cfi->chips[i].ref_point_counter = 0;
init_waitqueue_head(&(cfi->chips[i].wq));
}
@@ -1137,12 +1171,48 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_
return ret;
}
+typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len, u_char *buf, size_t grouplen);
+
+static inline void otp_enter(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
+}
+
+static inline void otp_exit(struct map_info *map, struct flchip *chip,
+ loff_t adr, size_t len)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
+}
-static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
+static inline int do_read_secsi_onechip(struct map_info *map,
+ struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf,
+ size_t grouplen)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long timeo = jiffies + HZ;
- struct cfi_private *cfi = map->fldrv_priv;
retry:
mutex_lock(&chip->mutex);
@@ -1164,16 +1234,9 @@ static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chi
chip->state = FL_READY;
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
-
+ otp_enter(map, chip, adr, len);
map_copy_from(map, buf, adr, len);
-
- cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
- cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
+ otp_exit(map, chip, adr, len);
wake_up(&chip->wq);
mutex_unlock(&chip->mutex);
@@ -1205,7 +1268,8 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
else
thislen = len;
- ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
+ ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
+ thislen, buf, 0);
if (ret)
break;
@@ -1219,8 +1283,267 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len,
return ret;
}
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode);
+
+static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf, size_t grouplen)
+{
+ int ret;
+ while (len) {
+ unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
+ int gap = adr - bus_ofs;
+ int n = min_t(int, len, map_bankwidth(map) - gap);
+ map_word datum;
+
+ if (n != map_bankwidth(map)) {
+ /* partial write of a word, load old contents */
+ otp_enter(map, chip, bus_ofs, map_bankwidth(map));
+ datum = map_read(map, bus_ofs);
+ otp_exit(map, chip, bus_ofs, map_bankwidth(map));
+ }
+
+ datum = map_word_load_partial(map, datum, buf, gap, n);
+ ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
+ if (ret)
+ return ret;
+
+ adr += n;
+ buf += n;
+ len -= n;
+ }
+
+ return 0;
+}
+
+static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
+ size_t len, u_char *buf, size_t grouplen)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ uint8_t lockreg;
+ unsigned long timeo;
+ int ret;
+
+ /* make sure area matches group boundaries */
+ if ((adr != 0) || (len != grouplen))
+ return -EINVAL;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ chip->state = FL_LOCKING;
+
+ /* Enter lock register command */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ /* read lock register */
+ lockreg = cfi_read_query(map, 0);
+
+ /* set bit 0 to protect extended memory block */
+ lockreg &= ~0x01;
+
+ /* set bit 0 to protect extended memory block */
+ /* write lock register */
+ map_write(map, CMD(0xA0), chip->start);
+ map_write(map, CMD(lockreg), chip->start);
+
+ /* wait for chip to become ready */
+ timeo = jiffies + msecs_to_jiffies(2);
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ pr_err("Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+ UDELAY(map, chip, 0, 1);
+ }
+
+ /* exit protection commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf,
+ otp_op_t action, int user_regs)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ int ofs_factor = cfi->interleave * cfi->device_type;
+ unsigned long base;
+ int chipnum;
+ struct flchip *chip;
+ uint8_t otp, lockreg;
+ int ret;
+
+ size_t user_size, factory_size, otpsize;
+ loff_t user_offset, factory_offset, otpoffset;
+ int user_locked = 0, otplocked;
+
+ *retlen = 0;
+
+ for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
+ chip = &cfi->chips[chipnum];
+ factory_size = 0;
+ user_size = 0;
-static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
+ /* Micron M29EW family */
+ if (is_m29ew(cfi)) {
+ base = chip->start;
+
+ /* check whether secsi area is factory locked
+ or user lockable */
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, base, FL_CFI_QUERY);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+ cfi_qry_mode_on(base, map, cfi);
+ otp = cfi_read_query(map, base + 0x3 * ofs_factor);
+ cfi_qry_mode_off(base, map, cfi);
+ put_chip(map, chip, base);
+ mutex_unlock(&chip->mutex);
+
+ if (otp & 0x80) {
+ /* factory locked */
+ factory_offset = 0;
+ factory_size = 0x100;
+ } else {
+ /* customer lockable */
+ user_offset = 0;
+ user_size = 0x100;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, base, FL_LOCKING);
+
+ /* Enter lock register command */
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
+ chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* read lock register */
+ lockreg = cfi_read_query(map, 0);
+ /* exit protection commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+ put_chip(map, chip, chip->start);
+ mutex_unlock(&chip->mutex);
+
+ user_locked = ((lockreg & 0x01) == 0x00);
+ }
+ }
+
+ otpsize = user_regs ? user_size : factory_size;
+ if (!otpsize)
+ continue;
+ otpoffset = user_regs ? user_offset : factory_offset;
+ otplocked = user_regs ? user_locked : 1;
+
+ if (!action) {
+ /* return otpinfo */
+ struct otp_info *otpinfo;
+ len -= sizeof(*otpinfo);
+ if (len <= 0)
+ return -ENOSPC;
+ otpinfo = (struct otp_info *)buf;
+ otpinfo->start = from;
+ otpinfo->length = otpsize;
+ otpinfo->locked = otplocked;
+ buf += sizeof(*otpinfo);
+ *retlen += sizeof(*otpinfo);
+ from += otpsize;
+ } else if ((from < otpsize) && (len > 0)) {
+ size_t size;
+ size = (len < otpsize - from) ? len : otpsize - from;
+ ret = action(map, chip, otpoffset + from, size, buf,
+ otpsize);
+ if (ret < 0)
+ return ret;
+
+ buf += size;
+ len -= size;
+ *retlen += size;
+ from = 0;
+ } else {
+ from -= otpsize;
+ }
+ }
+ return 0;
+}
+
+static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 0);
+}
+
+static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
+ size_t *retlen, struct otp_info *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
+ NULL, 1);
+}
+
+static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen,
+ buf, do_read_secsi_onechip, 0);
+}
+
+static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen,
+ buf, do_read_secsi_onechip, 1);
+}
+
+static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len, size_t *retlen,
+ u_char *buf)
+{
+ return cfi_amdstd_otp_walk(mtd, from, len, retlen, buf,
+ do_otp_write, 1);
+}
+
+static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
+ size_t len)
+{
+ size_t retlen;
+ return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
+ do_otp_lock, 1);
+}
+
+static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
+ unsigned long adr, map_word datum,
+ int mode)
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
@@ -1241,7 +1564,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
adr += chip->start;
mutex_lock(&chip->mutex);
- ret = get_chip(map, chip, adr, FL_WRITING);
+ ret = get_chip(map, chip, adr, mode);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
@@ -1250,6 +1573,9 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
__func__, adr, datum.x[0] );
+ if (mode == FL_OTP_WRITE)
+ otp_enter(map, chip, adr, map_bankwidth(map));
+
/*
* Check for a NOP for the case when the datum to write is already
* present - it saves time and works around buggy chips that corrupt
@@ -1266,12 +1592,13 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
ENABLE_VPP(map);
xip_disable(map, chip, adr);
+
retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
map_write(map, datum, adr);
- chip->state = FL_WRITING;
+ chip->state = mode;
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map_bankwidth(map),
@@ -1280,7 +1607,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
/* See comment above for timeout value. */
timeo = jiffies + uWriteTimeout;
for (;;) {
- if (chip->state != FL_WRITING) {
+ if (chip->state != mode) {
/* Someone's suspended the write. Sleep */
DECLARE_WAITQUEUE(wait, current);
@@ -1320,6 +1647,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
}
xip_enable(map, chip, adr);
op_done:
+ if (mode == FL_OTP_WRITE)
+ otp_exit(map, chip, adr, map_bankwidth(map));
chip->state = FL_READY;
DISABLE_VPP(map);
put_chip(map, chip, adr);
@@ -1375,7 +1704,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
ret = do_write_oneword(map, &cfi->chips[chipnum],
- bus_ofs, tmp_buf);
+ bus_ofs, tmp_buf, FL_WRITING);
if (ret)
return ret;
@@ -1399,7 +1728,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
datum = map_word_load(map, buf);
ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, datum);
+ ofs, datum, FL_WRITING);
if (ret)
return ret;
@@ -1442,7 +1771,7 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
ret = do_write_oneword(map, &cfi->chips[chipnum],
- ofs, tmp_buf);
+ ofs, tmp_buf, FL_WRITING);
if (ret)
return ret;
@@ -1462,8 +1791,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
{
struct cfi_private *cfi = map->fldrv_priv;
unsigned long timeo = jiffies + HZ;
- /* see comments in do_write_oneword() regarding uWriteTimeo. */
- unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
+ /*
+ * Timeout is calculated according to CFI data, if available.
+ * See more comments in cfi_cmdset_0002().
+ */
+ unsigned long uWriteTimeout =
+ usecs_to_jiffies(chip->buffer_write_time_max);
int ret = -EIO;
unsigned long cmd_adr;
int z, words;
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 3e829b37af8d..c8503006f17a 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -26,7 +26,7 @@
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* size is automatically truncated at end of device
- * if specified or trucated size is 0 the part is skipped
+ * if specified or truncated size is 0 the part is skipped
* <offset> := standard linux memsize
* if omitted the part will immediately follow the previous part
* or 0 if the first part
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 2cceebfb251e..effd9a4ef7ee 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -181,12 +181,10 @@ static int parse_name(char **pname, const char *token)
if (len > 64)
return -ENOSPC;
- name = kmalloc(len, GFP_KERNEL);
+ name = kstrdup(token, GFP_KERNEL);
if (!name)
return -ENOMEM;
- strcpy(name, token);
-
*pname = name;
return 0;
}
@@ -195,6 +193,7 @@ static int parse_name(char **pname, const char *token)
static inline void kill_final_newline(char *str)
{
char *newline = strrchr(str, '\n');
+
if (newline && !newline[1])
*newline = 0;
}
@@ -233,7 +232,7 @@ static int phram_setup(const char *val)
strcpy(str, val);
kill_final_newline(str);
- for (i=0; i<3; i++)
+ for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str)
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 19d637266fcd..dabf08450d0b 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -111,7 +111,6 @@ typedef struct partition_t {
struct mtd_blktrans_dev mbd;
uint32_t state;
uint32_t *VirtualBlockMap;
- uint32_t *VirtualPageMap;
uint32_t FreeTotal;
struct eun_info_t {
uint32_t Offset;
@@ -1035,8 +1034,6 @@ static void ftl_freepart(partition_t *part)
{
vfree(part->VirtualBlockMap);
part->VirtualBlockMap = NULL;
- kfree(part->VirtualPageMap);
- part->VirtualPageMap = NULL;
kfree(part->EUNInfo);
part->EUNInfo = NULL;
kfree(part->XferInfo);
@@ -1075,7 +1072,6 @@ static void ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
return;
}
- ftl_freepart(partition);
kfree(partition);
}
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 146b6047ed2b..a84fdfb10518 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -35,8 +35,6 @@ static int rbtx4939_flash_remove(struct platform_device *dev)
return 0;
if (info->mtd) {
- struct rbtx4939_flash_data *pdata = dev_get_platdata(&dev->dev);
-
mtd_device_unregister(info->mtd);
map_destroy(info->mtd);
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index d201feeb3ca6..e4831b4159db 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -298,6 +298,47 @@ static ssize_t mtd_ecc_step_size_show(struct device *dev,
}
static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
+static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
+}
+static DEVICE_ATTR(corrected_bits, S_IRUGO,
+ mtd_ecc_stats_corrected_show, NULL);
+
+static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
+}
+static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
+
+static ssize_t mtd_badblocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
+}
+static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
+
+static ssize_t mtd_bbtblocks_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
+}
+static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
+
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
&dev_attr_flags.attr,
@@ -310,6 +351,10 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_name.attr,
&dev_attr_ecc_strength.attr,
&dev_attr_ecc_step_size.attr,
+ &dev_attr_corrected_bits.attr,
+ &dev_attr_ecc_failures.attr,
+ &dev_attr_bad_blocks.attr,
+ &dev_attr_bbt_blocks.attr,
&dev_attr_bitflip_threshold.attr,
NULL,
};
@@ -998,12 +1043,22 @@ int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
}
EXPORT_SYMBOL_GPL(mtd_is_locked);
-int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
- if (!mtd->_block_isbad)
+ if (ofs < 0 || ofs > mtd->size)
+ return -EINVAL;
+ if (!mtd->_block_isreserved)
return 0;
+ return mtd->_block_isreserved(mtd, ofs);
+}
+EXPORT_SYMBOL_GPL(mtd_block_isreserved);
+
+int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
if (ofs < 0 || ofs > mtd->size)
return -EINVAL;
+ if (!mtd->_block_isbad)
+ return 0;
return mtd->_block_isbad(mtd, ofs);
}
EXPORT_SYMBOL_GPL(mtd_block_isbad);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 1ca9aec141ff..a3e3a7d074d5 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -290,6 +290,13 @@ static void part_resume(struct mtd_info *mtd)
part->master->_resume(part->master);
}
+static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct mtd_part *part = PART(mtd);
+ ofs += part->offset;
+ return part->master->_block_isreserved(part->master, ofs);
+}
+
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
{
struct mtd_part *part = PART(mtd);
@@ -422,6 +429,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd._unlock = part_unlock;
if (master->_is_locked)
slave->mtd._is_locked = part_is_locked;
+ if (master->_block_isreserved)
+ slave->mtd._block_isreserved = part_block_isreserved;
if (master->_block_isbad)
slave->mtd._block_isbad = part_block_isbad;
if (master->_block_markbad)
@@ -526,7 +535,9 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
uint64_t offs = 0;
while (offs < slave->mtd.size) {
- if (mtd_block_isbad(master, offs + slave->offset))
+ if (mtd_block_isreserved(master, offs + slave->offset))
+ slave->mtd.ecc_stats.bbtblocks++;
+ else if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 542b5689eb63..a035e7cc6d46 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -50,4 +50,4 @@ obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/
obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o
obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/
-nand-objs := nand_base.o nand_bbt.o
+nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 4ce181a35bcd..e321c564ff05 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -97,7 +97,9 @@ struct atmel_nfc {
bool write_by_sram;
bool is_initialized;
- struct completion comp_nfc;
+ struct completion comp_ready;
+ struct completion comp_cmd_done;
+ struct completion comp_xfer_done;
/* Point to the sram bank which include readed data via NFC */
void __iomem *data_in_sram;
@@ -861,12 +863,11 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
{
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
- int i, err_nbr, eccbytes;
+ int i, err_nbr;
uint8_t *buf_pos;
int total_err = 0;
- eccbytes = nand_chip->ecc.bytes;
- for (i = 0; i < eccbytes; i++)
+ for (i = 0; i < nand_chip->ecc.total; i++)
if (ecc[i] != 0xff)
goto normal_check;
/* Erased page, return OK */
@@ -928,7 +929,7 @@ static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
struct atmel_nand_host *host = chip->priv;
- int eccsize = chip->ecc.size;
+ int eccsize = chip->ecc.size * chip->ecc.steps;
uint8_t *oob = chip->oob_poi;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t stat;
@@ -1169,8 +1170,7 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
goto err;
}
- /* ECC is calculated for the whole page (1 step) */
- nand_chip->ecc.size = mtd->writesize;
+ nand_chip->ecc.size = sector_size;
/* set ECC page size and oob layout */
switch (mtd->writesize) {
@@ -1185,18 +1185,20 @@ static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
host->pmecc_index_of = host->pmecc_rom_base +
host->pmecc_lookup_table_offset;
- nand_chip->ecc.steps = 1;
+ nand_chip->ecc.steps = host->pmecc_sector_number;
nand_chip->ecc.strength = cap;
- nand_chip->ecc.bytes = host->pmecc_bytes_per_sector *
+ nand_chip->ecc.bytes = host->pmecc_bytes_per_sector;
+ nand_chip->ecc.total = host->pmecc_bytes_per_sector *
host->pmecc_sector_number;
- if (nand_chip->ecc.bytes > mtd->oobsize - 2) {
+ if (nand_chip->ecc.total > mtd->oobsize - 2) {
dev_err(host->dev, "No room for ECC bytes\n");
err_no = -EINVAL;
goto err;
}
pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
mtd->oobsize,
- nand_chip->ecc.bytes);
+ nand_chip->ecc.total);
+
nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
break;
case 512:
@@ -1572,49 +1574,104 @@ static int atmel_hw_nand_init_params(struct platform_device *pdev,
return 0;
}
+static inline u32 nfc_read_status(struct atmel_nand_host *host)
+{
+ u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE;
+ u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR);
+
+ if (unlikely(nfc_status & err_flags)) {
+ if (nfc_status & NFC_SR_DTOE)
+ dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n");
+ else if (nfc_status & NFC_SR_UNDEF)
+ dev_err(host->dev, "NFC: Access Undefined Area Error\n");
+ else if (nfc_status & NFC_SR_AWB)
+ dev_err(host->dev, "NFC: Access memory While NFC is busy\n");
+ else if (nfc_status & NFC_SR_ASE)
+ dev_err(host->dev, "NFC: Access memory Size Error\n");
+ }
+
+ return nfc_status;
+}
+
/* SMC interrupt service routine */
static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
{
struct atmel_nand_host *host = dev_id;
u32 status, mask, pending;
- irqreturn_t ret = IRQ_HANDLED;
+ irqreturn_t ret = IRQ_NONE;
- status = nfc_readl(host->nfc->hsmc_regs, SR);
+ status = nfc_read_status(host);
mask = nfc_readl(host->nfc->hsmc_regs, IMR);
pending = status & mask;
if (pending & NFC_SR_XFR_DONE) {
- complete(&host->nfc->comp_nfc);
+ complete(&host->nfc->comp_xfer_done);
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
- } else if (pending & NFC_SR_RB_EDGE) {
- complete(&host->nfc->comp_nfc);
+ ret = IRQ_HANDLED;
+ }
+ if (pending & NFC_SR_RB_EDGE) {
+ complete(&host->nfc->comp_ready);
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
- } else if (pending & NFC_SR_CMD_DONE) {
- complete(&host->nfc->comp_nfc);
+ ret = IRQ_HANDLED;
+ }
+ if (pending & NFC_SR_CMD_DONE) {
+ complete(&host->nfc->comp_cmd_done);
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
- } else {
- ret = IRQ_NONE;
+ ret = IRQ_HANDLED;
}
return ret;
}
/* NFC(Nand Flash Controller) related functions */
-static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
+static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
{
- unsigned long timeout;
- init_completion(&host->nfc->comp_nfc);
+ if (flag & NFC_SR_XFR_DONE)
+ init_completion(&host->nfc->comp_xfer_done);
+
+ if (flag & NFC_SR_RB_EDGE)
+ init_completion(&host->nfc->comp_ready);
+
+ if (flag & NFC_SR_CMD_DONE)
+ init_completion(&host->nfc->comp_cmd_done);
/* Enable interrupt that need to wait for */
nfc_writel(host->nfc->hsmc_regs, IER, flag);
+}
- timeout = wait_for_completion_timeout(&host->nfc->comp_nfc,
- msecs_to_jiffies(NFC_TIME_OUT_MS));
- if (timeout)
- return 0;
+static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+ int i, index = 0;
+ struct completion *comp[3]; /* Support 3 interrupt completion */
- /* Time out to wait for the interrupt */
+ if (flag & NFC_SR_XFR_DONE)
+ comp[index++] = &host->nfc->comp_xfer_done;
+
+ if (flag & NFC_SR_RB_EDGE)
+ comp[index++] = &host->nfc->comp_ready;
+
+ if (flag & NFC_SR_CMD_DONE)
+ comp[index++] = &host->nfc->comp_cmd_done;
+
+ if (index == 0) {
+ dev_err(host->dev, "Unkown interrupt flag: 0x%08x\n", flag);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < index; i++) {
+ if (wait_for_completion_timeout(comp[i],
+ msecs_to_jiffies(NFC_TIME_OUT_MS)))
+ continue; /* wait for next completion */
+ else
+ goto err_timeout;
+ }
+
+ return 0;
+
+err_timeout:
dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
+ /* Disable the interrupt as it is not handled by interrupt handler */
+ nfc_writel(host->nfc->hsmc_regs, IDR, flag);
return -ETIMEDOUT;
}
@@ -1622,6 +1679,9 @@ static int nfc_send_command(struct atmel_nand_host *host,
unsigned int cmd, unsigned int addr, unsigned char cycle0)
{
unsigned long timeout;
+ u32 flag = NFC_SR_CMD_DONE;
+ flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0;
+
dev_dbg(host->dev,
"nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
cmd, addr, cycle0);
@@ -1635,18 +1695,28 @@ static int nfc_send_command(struct atmel_nand_host *host,
return -ETIMEDOUT;
}
}
+
+ nfc_prepare_interrupt(host, flag);
nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
- return nfc_wait_interrupt(host, NFC_SR_CMD_DONE);
+ return nfc_wait_interrupt(host, flag);
}
static int nfc_device_ready(struct mtd_info *mtd)
{
+ u32 status, mask;
struct nand_chip *nand_chip = mtd->priv;
struct atmel_nand_host *host = nand_chip->priv;
- if (!nfc_wait_interrupt(host, NFC_SR_RB_EDGE))
- return 1;
- return 0;
+
+ status = nfc_read_status(host);
+ mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+
+ /* The mask should be 0. If not we may lost interrupts */
+ if (unlikely(mask & status))
+ dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
+ mask & status);
+
+ return status & NFC_SR_RB_EDGE;
}
static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1795,10 +1865,6 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
- if (dataen == NFCADDR_CMD_DATAEN)
- if (nfc_wait_interrupt(host, NFC_SR_XFR_DONE))
- dev_err(host->dev, "something wrong, No XFR_DONE interrupt comes.\n");
-
/*
* Program and erase have their own busy handlers status, sequential
* in, and deplete1 need no delay.
@@ -1823,6 +1889,7 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
/* fall through */
default:
+ nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
}
}
@@ -2209,6 +2276,9 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
}
}
+ nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
+ nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
+
nfc->is_initialized = true;
dev_info(&pdev->dev, "NFC is probed.\n");
return 0;
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 4efd117cd3a3..85b8ca6af7d2 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -37,6 +37,10 @@
#define ATMEL_HSMC_NFC_SR 0x08 /* NFC Status Register */
#define NFC_SR_XFR_DONE (1 << 16)
#define NFC_SR_CMD_DONE (1 << 17)
+#define NFC_SR_DTOE (1 << 20)
+#define NFC_SR_UNDEF (1 << 21)
+#define NFC_SR_AWB (1 << 22)
+#define NFC_SR_ASE (1 << 23)
#define NFC_SR_RB_EDGE (1 << 24)
#define ATMEL_HSMC_NFC_IER 0x0c
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index bc5c518828d2..77d6c17b38c2 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -41,7 +41,7 @@ static u_char au_read_byte(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u_char ret = readb(this->IO_ADDR_R);
- au_sync();
+ wmb(); /* drain writebuffer */
return ret;
}
@@ -56,7 +56,7 @@ static void au_write_byte(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
writeb(byte, this->IO_ADDR_W);
- au_sync();
+ wmb(); /* drain writebuffer */
}
/**
@@ -69,7 +69,7 @@ static u_char au_read_byte16(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
- au_sync();
+ wmb(); /* drain writebuffer */
return ret;
}
@@ -84,7 +84,7 @@ static void au_write_byte16(struct mtd_info *mtd, u_char byte)
{
struct nand_chip *this = mtd->priv;
writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
- au_sync();
+ wmb(); /* drain writebuffer */
}
/**
@@ -97,7 +97,7 @@ static u16 au_read_word(struct mtd_info *mtd)
{
struct nand_chip *this = mtd->priv;
u16 ret = readw(this->IO_ADDR_R);
- au_sync();
+ wmb(); /* drain writebuffer */
return ret;
}
@@ -116,7 +116,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
for (i = 0; i < len; i++) {
writeb(buf[i], this->IO_ADDR_W);
- au_sync();
+ wmb(); /* drain writebuffer */
}
}
@@ -135,7 +135,7 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
for (i = 0; i < len; i++) {
buf[i] = readb(this->IO_ADDR_R);
- au_sync();
+ wmb(); /* drain writebuffer */
}
}
@@ -156,7 +156,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
for (i = 0; i < len; i++) {
writew(p[i], this->IO_ADDR_W);
- au_sync();
+ wmb(); /* drain writebuffer */
}
}
@@ -178,7 +178,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
for (i = 0; i < len; i++) {
p[i] = readw(this->IO_ADDR_R);
- au_sync();
+ wmb(); /* drain writebuffer */
}
}
@@ -223,26 +223,23 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
case NAND_CTL_SETNCE:
/* assert (force assert) chip enable */
- au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL);
+ alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
break;
case NAND_CTL_CLRNCE:
/* deassert chip enable */
- au_writel(0, MEM_STNDCTL);
+ alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
break;
}
this->IO_ADDR_R = this->IO_ADDR_W;
- /* Drain the writebuffer */
- au_sync();
+ wmb(); /* Drain the writebuffer */
}
int au1550_device_ready(struct mtd_info *mtd)
{
- int ret = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0;
- au_sync();
- return ret;
+ return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
}
/**
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 722898aea7a6..871c4f712654 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -830,34 +830,10 @@ out_err:
return err;
}
-/* PM Support */
-#ifdef CONFIG_PM
-
-static int bf5xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
-{
- struct bf5xx_nand_info *info = platform_get_drvdata(dev);
-
- return 0;
-}
-
-static int bf5xx_nand_resume(struct platform_device *dev)
-{
- struct bf5xx_nand_info *info = platform_get_drvdata(dev);
-
- return 0;
-}
-
-#else
-#define bf5xx_nand_suspend NULL
-#define bf5xx_nand_resume NULL
-#endif
-
/* driver device registration */
static struct platform_driver bf5xx_nand_driver = {
.probe = bf5xx_nand_probe,
.remove = bf5xx_nand_remove,
- .suspend = bf5xx_nand_suspend,
- .resume = bf5xx_nand_resume,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index 9f2012a3e764..0b071a3136a2 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -473,7 +473,7 @@ static void detect_partition_feature(struct denali_nand_info *denali)
static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
{
uint16_t status = PASS;
- uint32_t id_bytes[5], addr;
+ uint32_t id_bytes[8], addr;
uint8_t i, maf_id, device_id;
dev_dbg(denali->dev,
@@ -488,7 +488,7 @@ static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
index_addr(denali, (uint32_t)addr | 0, 0x90);
index_addr(denali, (uint32_t)addr | 1, 0);
- for (i = 0; i < 5; i++)
+ for (i = 0; i < 8; i++)
index_addr_read_data(denali, addr | 2, &id_bytes[i]);
maf_id = id_bytes[0];
device_id = id_bytes[1];
@@ -1276,7 +1276,7 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
index_addr(denali, (uint32_t)addr | 0, 0x90);
index_addr(denali, (uint32_t)addr | 1, 0);
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 8; i++) {
index_addr_read_data(denali,
(uint32_t)addr | 2,
&id);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index f638cd8077ca..959cb9b70310 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -285,9 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
geo->ecc_strength = get_ecc_strength(this);
if (!gpmi_check_ecc(this)) {
dev_err(this->dev,
- "We can not support this nand chip."
- " Its required ecc strength(%d) is beyond our"
- " capability(%d).\n", geo->ecc_strength,
+ "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n",
+ geo->ecc_strength,
this->devdata->bch_max_ecc_strength);
return -EINVAL;
}
@@ -1082,6 +1081,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int first, last, marker_pos;
int ecc_parity_size;
int col = 0;
+ int old_swap_block_mark = this->swap_block_mark;
/* The size of ECC parity */
ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
@@ -1090,17 +1090,21 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
first = offs / size;
last = (offs + len - 1) / size;
- /*
- * Find the chunk which contains the Block Marker. If this chunk is
- * in the range of [first, last], we have to read out the whole page.
- * Why? since we had swapped the data at the position of Block Marker
- * to the metadata which is bound with the chunk 0.
- */
- marker_pos = geo->block_mark_byte_offset / size;
- if (last >= marker_pos && first <= marker_pos) {
- dev_dbg(this->dev, "page:%d, first:%d, last:%d, marker at:%d\n",
+ if (this->swap_block_mark) {
+ /*
+ * Find the chunk which contains the Block Marker.
+ * If this chunk is in the range of [first, last],
+ * we have to read out the whole page.
+ * Why? since we had swapped the data at the position of Block
+ * Marker to the metadata which is bound with the chunk 0.
+ */
+ marker_pos = geo->block_mark_byte_offset / size;
+ if (last >= marker_pos && first <= marker_pos) {
+ dev_dbg(this->dev,
+ "page:%d, first:%d, last:%d, marker at:%d\n",
page, first, last, marker_pos);
- return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+ }
}
meta = geo->metadata_size;
@@ -1146,7 +1150,7 @@ static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
this->bch_geometry = old_geo;
- this->swap_block_mark = true;
+ this->swap_block_mark = old_swap_block_mark;
return max_bitflips;
}
@@ -1180,7 +1184,7 @@ static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
/* Handle block mark swapping. */
block_mark_swapping(this,
- (void *) payload_virt, (void *) auxiliary_virt);
+ (void *)payload_virt, (void *)auxiliary_virt);
} else {
/*
* If control arrives here, we're not doing block mark swapping,
@@ -1310,10 +1314,10 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
/*
* Now, we want to make sure the block mark is correct. In the
- * Swapping/Raw case, we already have it. Otherwise, we need to
- * explicitly read it.
+ * non-transcribing case (!GPMI_IS_MX23()), we already have it.
+ * Otherwise, we need to explicitly read it.
*/
- if (!this->swap_block_mark) {
+ if (GPMI_IS_MX23(this)) {
/* Read the block mark into the first byte of the OOB buffer. */
chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
chip->oob_poi[0] = chip->read_byte(mtd);
@@ -1354,7 +1358,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
chipnr = (int)(ofs >> chip->chip_shift);
chip->select_chip(mtd, chipnr);
- column = this->swap_block_mark ? mtd->writesize : 0;
+ column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
/* Write the block mark. */
block_mark = this->data_buffer_dma;
@@ -1597,8 +1601,9 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
dev_dbg(dev, "Transcribing mark in block %u\n", block);
ret = chip->block_markbad(mtd, byte);
if (ret)
- dev_err(dev, "Failed to mark block bad with "
- "ret %d\n", ret);
+ dev_err(dev,
+ "Failed to mark block bad with ret %d\n",
+ ret);
}
}
@@ -1649,9 +1654,6 @@ static int gpmi_init_last(struct gpmi_nand_data *this)
struct bch_geometry *bch_geo = &this->bch_geometry;
int ret;
- /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
- this->swap_block_mark = !GPMI_IS_MX23(this);
-
/* Set up the medium geometry */
ret = gpmi_set_geometry(this);
if (ret)
@@ -1715,9 +1717,20 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
chip->badblock_pattern = &gpmi_bbt_descr;
chip->block_markbad = gpmi_block_markbad;
chip->options |= NAND_NO_SUBPAGE_WRITE;
- if (of_get_nand_on_flash_bbt(this->dev->of_node))
+
+ /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+ this->swap_block_mark = !GPMI_IS_MX23(this);
+
+ if (of_get_nand_on_flash_bbt(this->dev->of_node)) {
chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+ if (of_property_read_bool(this->dev->of_node,
+ "fsl,no-blockmark-swap"))
+ this->swap_block_mark = false;
+ }
+ dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+ this->swap_block_mark ? "en" : "dis");
+
/*
* Allocate a temporary DMA buffer for reading ID in the
* nand_scan_ident().
@@ -1760,16 +1773,16 @@ err_out:
static const struct of_device_id gpmi_nand_id_table[] = {
{
.compatible = "fsl,imx23-gpmi-nand",
- .data = (void *)&gpmi_devdata_imx23,
+ .data = &gpmi_devdata_imx23,
}, {
.compatible = "fsl,imx28-gpmi-nand",
- .data = (void *)&gpmi_devdata_imx28,
+ .data = &gpmi_devdata_imx28,
}, {
.compatible = "fsl,imx6q-gpmi-nand",
- .data = (void *)&gpmi_devdata_imx6q,
+ .data = &gpmi_devdata_imx6q,
}, {
.compatible = "fsl,imx6sx-gpmi-nand",
- .data = (void *)&gpmi_devdata_imx6sx,
+ .data = &gpmi_devdata_imx6sx,
}, {}
};
MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 687478c9f09c..7335346dc126 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -721,12 +721,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
nand_chip->bbt_td = &lpc32xx_nand_bbt;
nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
- /* bitflip_threshold's default is defined as ecc_strength anyway.
- * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
- * being 0, it causes bad block table scanning errors in
- * nand_scan_tail(), so preparing it here. */
- mtd->bitflip_threshold = nand_chip->ecc.strength;
-
if (use_dma) {
res = lpc32xx_dma_setup(host);
if (res) {
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
index 53a6742e3da3..8caef28e0756 100644
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ b/drivers/mtd/nand/lpc32xx_slc.c
@@ -840,12 +840,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
chip->ecc.strength = 1;
chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
- /* bitflip_threshold's default is defined as ecc_strength anyway.
- * Unfortunately, it is set only later at add_mtd_device(). Meanwhile
- * being 0, it causes bad block table scanning errors in
- * nand_scan_tail(), so preparing it here already. */
- mtd->bitflip_threshold = chip->ecc.strength;
-
/*
* Allocate a large enough buffer for a single huge page plus
* extra space for the spare area and ECC storage area
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 4f3e80c68a26..d8cdf06343fb 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -488,6 +488,23 @@ static int nand_check_wp(struct mtd_info *mtd)
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
* @mtd: MTD device structure
* @ofs: offset from device start
+ *
+ * Check if the block is mark as reserved.
+ */
+static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (!chip->bbt)
+ return 0;
+ /* Return info from the table */
+ return nand_isreserved_bbt(mtd, ofs);
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
* @getchip: 0, if the chip is already selected
* @allowbbt: 1, if its allowed to access the bbt area
*
@@ -4113,6 +4130,7 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd->_unlock = NULL;
mtd->_suspend = nand_suspend;
mtd->_resume = nand_resume;
+ mtd->_block_isreserved = nand_block_isreserved;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
mtd->writebufsize = mtd->writesize;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 7f0c3b4c2a4f..443fa82cde6a 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1311,6 +1311,20 @@ int nand_default_bbt(struct mtd_info *mtd)
}
/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *this = mtd->priv;
+ int block;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
+}
+
+/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
* @mtd: MTD device structure
* @offs: offset in the device
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
new file mode 100644
index 000000000000..8b36253420fa
--- /dev/null
+++ b/drivers/mtd/nand/nand_timings.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2014 Free Electrons
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/mtd/nand.h>
+
+static const struct nand_sdr_timings onfi_sdr_timings[] = {
+ /* Mode 0 */
+ {
+ .tADL_min = 200000,
+ .tALH_min = 20000,
+ .tALS_min = 50000,
+ .tAR_min = 25000,
+ .tCEA_max = 100000,
+ .tCEH_min = 20000,
+ .tCH_min = 20000,
+ .tCHZ_max = 100000,
+ .tCLH_min = 20000,
+ .tCLR_min = 20000,
+ .tCLS_min = 50000,
+ .tCOH_min = 0,
+ .tCS_min = 70000,
+ .tDH_min = 20000,
+ .tDS_min = 40000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 10000,
+ .tITC_max = 1000000,
+ .tRC_min = 100000,
+ .tREA_max = 40000,
+ .tREH_min = 30000,
+ .tRHOH_min = 0,
+ .tRHW_min = 200000,
+ .tRHZ_max = 200000,
+ .tRLOH_min = 0,
+ .tRP_min = 50000,
+ .tRST_max = 250000000000,
+ .tWB_max = 200000,
+ .tRR_min = 40000,
+ .tWC_min = 100000,
+ .tWH_min = 30000,
+ .tWHR_min = 120000,
+ .tWP_min = 50000,
+ .tWW_min = 100000,
+ },
+ /* Mode 1 */
+ {
+ .tADL_min = 100000,
+ .tALH_min = 10000,
+ .tALS_min = 25000,
+ .tAR_min = 10000,
+ .tCEA_max = 45000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 25000,
+ .tCOH_min = 15000,
+ .tCS_min = 35000,
+ .tDH_min = 10000,
+ .tDS_min = 20000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 50000,
+ .tREA_max = 30000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 25000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 45000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 25000,
+ .tWW_min = 100000,
+ },
+ /* Mode 2 */
+ {
+ .tADL_min = 100000,
+ .tALH_min = 10000,
+ .tALS_min = 15000,
+ .tAR_min = 10000,
+ .tCEA_max = 30000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 15000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 15000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 35000,
+ .tREA_max = 25000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tRP_min = 17000,
+ .tWC_min = 35000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 17000,
+ .tWW_min = 100000,
+ },
+ /* Mode 3 */
+ {
+ .tADL_min = 100000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 30000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 15000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 30000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 15000,
+ .tWW_min = 100000,
+ },
+ /* Mode 4 */
+ {
+ .tADL_min = 70000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 20000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 25000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 12000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 25000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 12000,
+ .tWW_min = 100000,
+ },
+ /* Mode 5 */
+ {
+ .tADL_min = 70000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 5000,
+ .tDS_min = 7000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 20000,
+ .tREA_max = 16000,
+ .tREH_min = 7000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 10000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 20000,
+ .tWH_min = 7000,
+ .tWHR_min = 80000,
+ .tWP_min = 10000,
+ .tWW_min = 100000,
+ },
+};
+
+/**
+ * onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
+ * timings according to the given ONFI timing mode
+ * @mode: ONFI timing mode
+ */
+const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
+{
+ if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
+ return ERR_PTR(-EINVAL);
+
+ return &onfi_sdr_timings[mode];
+}
+EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 79acbb8691b5..6b97bf17ce5d 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -208,10 +208,10 @@ static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
if (info->clk_state == CLOCK_ENABLE) {
if (new_state != CLOCK_ENABLE)
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
} else {
if (new_state == CLOCK_ENABLE)
- clk_enable(info->clk);
+ clk_prepare_enable(info->clk);
}
info->clk_state = new_state;
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index ab2607273e80..dcae2f6a2b11 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -32,10 +32,10 @@ config MTD_ONENAND_OMAP2
config MTD_ONENAND_SAMSUNG
tristate "OneNAND on Samsung SOC controller support"
- depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS4
help
Support for a OneNAND flash device connected to an Samsung SOC.
- S3C64XX/S5PC100 use command mapping method.
+ S3C64XX uses command mapping method.
S5PC110/S5PC210 use generic OneNAND method.
config MTD_ONENAND_OTP
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index efb819c3df2f..19cfb97adbc0 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -10,7 +10,7 @@
* published by the Free Software Foundation.
*
* Implementation:
- * S3C64XX and S5PC100: emulate the pseudo BufferRAM
+ * S3C64XX: emulate the pseudo BufferRAM
* S5PC110: use DMA
*/
@@ -32,7 +32,6 @@
enum soc_type {
TYPE_S3C6400,
TYPE_S3C6410,
- TYPE_S5PC100,
TYPE_S5PC110,
};
@@ -59,7 +58,6 @@ enum soc_type {
#define MAP_11 (0x3)
#define S3C64XX_CMD_MAP_SHIFT 24
-#define S5PC100_CMD_MAP_SHIFT 26
#define S3C6400_FBA_SHIFT 10
#define S3C6400_FPA_SHIFT 4
@@ -69,10 +67,6 @@ enum soc_type {
#define S3C6410_FPA_SHIFT 6
#define S3C6410_FSA_SHIFT 4
-#define S5PC100_FBA_SHIFT 13
-#define S5PC100_FPA_SHIFT 7
-#define S5PC100_FSA_SHIFT 5
-
/* S5PC110 specific definitions */
#define S5PC110_DMA_SRC_ADDR 0x400
#define S5PC110_DMA_SRC_CFG 0x404
@@ -195,11 +189,6 @@ static unsigned int s3c64xx_cmd_map(unsigned type, unsigned val)
return (type << S3C64XX_CMD_MAP_SHIFT) | val;
}
-static unsigned int s5pc1xx_cmd_map(unsigned type, unsigned val)
-{
- return (type << S5PC100_CMD_MAP_SHIFT) | val;
-}
-
static unsigned int s3c6400_mem_addr(int fba, int fpa, int fsa)
{
return (fba << S3C6400_FBA_SHIFT) | (fpa << S3C6400_FPA_SHIFT) |
@@ -212,12 +201,6 @@ static unsigned int s3c6410_mem_addr(int fba, int fpa, int fsa)
(fsa << S3C6410_FSA_SHIFT);
}
-static unsigned int s5pc100_mem_addr(int fba, int fpa, int fsa)
-{
- return (fba << S5PC100_FBA_SHIFT) | (fpa << S5PC100_FPA_SHIFT) |
- (fsa << S5PC100_FSA_SHIFT);
-}
-
static void s3c_onenand_reset(void)
{
unsigned long timeout = 0x10000;
@@ -835,9 +818,6 @@ static void s3c_onenand_setup(struct mtd_info *mtd)
} else if (onenand->type == TYPE_S3C6410) {
onenand->mem_addr = s3c6410_mem_addr;
onenand->cmd_map = s3c64xx_cmd_map;
- } else if (onenand->type == TYPE_S5PC100) {
- onenand->mem_addr = s5pc100_mem_addr;
- onenand->cmd_map = s5pc1xx_cmd_map;
} else if (onenand->type == TYPE_S5PC110) {
/* Use generic onenand functions */
this->read_bufferram = s5pc110_read_bufferram;
@@ -1111,9 +1091,6 @@ static struct platform_device_id s3c_onenand_driver_ids[] = {
.name = "s3c6410-onenand",
.driver_data = TYPE_S3C6410,
}, {
- .name = "s5pc100-onenand",
- .driver_data = TYPE_S5PC100,
- }, {
.name = "s5pc110-onenand",
.driver_data = TYPE_S5PC110,
}, { },
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index c713c8656710..b5ad6bebf5e7 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -48,6 +48,25 @@ static int read_sr(struct spi_nor *nor)
}
/*
+ * Read the flag status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_fsr(struct spi_nor *nor)
+{
+ int ret;
+ u8 val;
+
+ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
+ if (ret < 0) {
+ pr_err("error %d reading FSR\n", ret);
+ return ret;
+ }
+
+ return val;
+}
+
+/*
* Read configuration register, returning its value in the
* location. Return the configuration register value.
* Returns negative if error occured.
@@ -165,6 +184,32 @@ static int spi_nor_wait_till_ready(struct spi_nor *nor)
return -ETIMEDOUT;
}
+static int spi_nor_wait_till_fsr_ready(struct spi_nor *nor)
+{
+ unsigned long deadline;
+ int sr;
+ int fsr;
+
+ deadline = jiffies + MAX_READY_WAIT_JIFFIES;
+
+ do {
+ cond_resched();
+
+ sr = read_sr(nor);
+ if (sr < 0) {
+ break;
+ } else if (!(sr & SR_WIP)) {
+ fsr = read_fsr(nor);
+ if (fsr < 0)
+ break;
+ if (fsr & FSR_READY)
+ return 0;
+ }
+ } while (!time_after_eq(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
/*
* Service routine to read status register until ready, or timeout occurs.
* Returns non-zero if error.
@@ -402,6 +447,7 @@ struct flash_info {
#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */
#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */
#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */
+#define USE_FSR 0x80 /* use flag status register */
};
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -449,6 +495,7 @@ const struct spi_device_id spi_nor_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
+ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
/* ESMT */
@@ -488,6 +535,8 @@ const struct spi_device_id spi_nor_ids[] = {
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
+ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, USE_FSR) },
+ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, USE_FSR) },
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
@@ -965,6 +1014,10 @@ int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
else
mtd->_write = spi_nor_write;
+ if ((info->flags & USE_FSR) &&
+ nor->wait_till_ready == spi_nor_wait_till_ready)
+ nor->wait_till_ready = spi_nor_wait_till_fsr_ready;
+
/* prefer "small sector" erase if possible */
if (info->flags & SECT_4K) {
nor->erase_opcode = SPINOR_OP_BE_4K;
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 8457df7ec5af..33c64955d4d7 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -378,9 +378,11 @@ int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
struct gendisk *gd;
- int disk_capacity;
+ u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
int ret;
+ if ((sector_t)disk_capacity != disk_capacity)
+ return -EFBIG;
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
@@ -412,7 +414,6 @@ int ubiblock_create(struct ubi_volume_info *vi)
gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
- disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
set_capacity(gd, disk_capacity);
dev->gd = gd;
@@ -498,11 +499,16 @@ int ubiblock_remove(struct ubi_volume_info *vi)
return 0;
}
-static void ubiblock_resize(struct ubi_volume_info *vi)
+static int ubiblock_resize(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
- int disk_capacity;
+ u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
+ if ((sector_t)disk_capacity != disk_capacity) {
+ ubi_warn("%s: the volume is too big, cannot resize (%d LEBs)",
+ dev->gd->disk_name, vi->size);
+ return -EFBIG;
+ }
/*
* Need to lock the device list until we stop using the device,
* otherwise the device struct might get released in
@@ -512,15 +518,15 @@ static void ubiblock_resize(struct ubi_volume_info *vi)
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
mutex_unlock(&devices_mutex);
- return;
+ return -ENODEV;
}
mutex_lock(&dev->dev_mutex);
- disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
set_capacity(dev->gd, disk_capacity);
ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
+ return 0;
}
static int ubiblock_notify(struct notifier_block *nb,
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index d77b1c1d7c72..07cac5f9ffb8 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -591,7 +591,7 @@ static int init_volumes(struct ubi_device *ubi,
/* Static volumes only */
av = ubi_find_av(ai, i);
- if (!av) {
+ if (!av || !av->leb_count) {
/*
* No eraseblocks belonging to this volume found. We
* don't actually know whether this static volume is
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 0f3425dac910..20f491713145 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1718,12 +1718,12 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
vol_id, lnum, ubi->works_count);
while (found) {
- struct ubi_work *wrk;
+ struct ubi_work *wrk, *tmp;
found = 0;
down_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
- list_for_each_entry(wrk, &ubi->works, list) {
+ list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
(lnum == UBI_ALL || wrk->lnum == lnum)) {
list_del(&wrk->list);
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index a78e4c136959..31c48a7ac2b6 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -89,6 +89,124 @@ MODULE_DESCRIPTION(DRV_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+/* AU1000 MAC registers and bits */
+#define MAC_CONTROL 0x0
+# define MAC_RX_ENABLE (1 << 2)
+# define MAC_TX_ENABLE (1 << 3)
+# define MAC_DEF_CHECK (1 << 5)
+# define MAC_SET_BL(X) (((X) & 0x3) << 6)
+# define MAC_AUTO_PAD (1 << 8)
+# define MAC_DISABLE_RETRY (1 << 10)
+# define MAC_DISABLE_BCAST (1 << 11)
+# define MAC_LATE_COL (1 << 12)
+# define MAC_HASH_MODE (1 << 13)
+# define MAC_HASH_ONLY (1 << 15)
+# define MAC_PASS_ALL (1 << 16)
+# define MAC_INVERSE_FILTER (1 << 17)
+# define MAC_PROMISCUOUS (1 << 18)
+# define MAC_PASS_ALL_MULTI (1 << 19)
+# define MAC_FULL_DUPLEX (1 << 20)
+# define MAC_NORMAL_MODE 0
+# define MAC_INT_LOOPBACK (1 << 21)
+# define MAC_EXT_LOOPBACK (1 << 22)
+# define MAC_DISABLE_RX_OWN (1 << 23)
+# define MAC_BIG_ENDIAN (1 << 30)
+# define MAC_RX_ALL (1 << 31)
+#define MAC_ADDRESS_HIGH 0x4
+#define MAC_ADDRESS_LOW 0x8
+#define MAC_MCAST_HIGH 0xC
+#define MAC_MCAST_LOW 0x10
+#define MAC_MII_CNTRL 0x14
+# define MAC_MII_BUSY (1 << 0)
+# define MAC_MII_READ 0
+# define MAC_MII_WRITE (1 << 1)
+# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
+# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
+#define MAC_MII_DATA 0x18
+#define MAC_FLOW_CNTRL 0x1C
+# define MAC_FLOW_CNTRL_BUSY (1 << 0)
+# define MAC_FLOW_CNTRL_ENABLE (1 << 1)
+# define MAC_PASS_CONTROL (1 << 2)
+# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16)
+#define MAC_VLAN1_TAG 0x20
+#define MAC_VLAN2_TAG 0x24
+
+/* Ethernet Controller Enable */
+# define MAC_EN_CLOCK_ENABLE (1 << 0)
+# define MAC_EN_RESET0 (1 << 1)
+# define MAC_EN_TOSS (0 << 2)
+# define MAC_EN_CACHEABLE (1 << 3)
+# define MAC_EN_RESET1 (1 << 4)
+# define MAC_EN_RESET2 (1 << 5)
+# define MAC_DMA_RESET (1 << 6)
+
+/* Ethernet Controller DMA Channels */
+/* offsets from MAC_TX_RING_ADDR address */
+#define MAC_TX_BUFF0_STATUS 0x0
+# define TX_FRAME_ABORTED (1 << 0)
+# define TX_JAB_TIMEOUT (1 << 1)
+# define TX_NO_CARRIER (1 << 2)
+# define TX_LOSS_CARRIER (1 << 3)
+# define TX_EXC_DEF (1 << 4)
+# define TX_LATE_COLL_ABORT (1 << 5)
+# define TX_EXC_COLL (1 << 6)
+# define TX_UNDERRUN (1 << 7)
+# define TX_DEFERRED (1 << 8)
+# define TX_LATE_COLL (1 << 9)
+# define TX_COLL_CNT_MASK (0xF << 10)
+# define TX_PKT_RETRY (1 << 31)
+#define MAC_TX_BUFF0_ADDR 0x4
+# define TX_DMA_ENABLE (1 << 0)
+# define TX_T_DONE (1 << 1)
+# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
+#define MAC_TX_BUFF0_LEN 0x8
+#define MAC_TX_BUFF1_STATUS 0x10
+#define MAC_TX_BUFF1_ADDR 0x14
+#define MAC_TX_BUFF1_LEN 0x18
+#define MAC_TX_BUFF2_STATUS 0x20
+#define MAC_TX_BUFF2_ADDR 0x24
+#define MAC_TX_BUFF2_LEN 0x28
+#define MAC_TX_BUFF3_STATUS 0x30
+#define MAC_TX_BUFF3_ADDR 0x34
+#define MAC_TX_BUFF3_LEN 0x38
+
+/* offsets from MAC_RX_RING_ADDR */
+#define MAC_RX_BUFF0_STATUS 0x0
+# define RX_FRAME_LEN_MASK 0x3fff
+# define RX_WDOG_TIMER (1 << 14)
+# define RX_RUNT (1 << 15)
+# define RX_OVERLEN (1 << 16)
+# define RX_COLL (1 << 17)
+# define RX_ETHER (1 << 18)
+# define RX_MII_ERROR (1 << 19)
+# define RX_DRIBBLING (1 << 20)
+# define RX_CRC_ERROR (1 << 21)
+# define RX_VLAN1 (1 << 22)
+# define RX_VLAN2 (1 << 23)
+# define RX_LEN_ERROR (1 << 24)
+# define RX_CNTRL_FRAME (1 << 25)
+# define RX_U_CNTRL_FRAME (1 << 26)
+# define RX_MCAST_FRAME (1 << 27)
+# define RX_BCAST_FRAME (1 << 28)
+# define RX_FILTER_FAIL (1 << 29)
+# define RX_PACKET_FILTER (1 << 30)
+# define RX_MISSED_FRAME (1 << 31)
+
+# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \
+ RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
+ RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
+#define MAC_RX_BUFF0_ADDR 0x4
+# define RX_DMA_ENABLE (1 << 0)
+# define RX_T_DONE (1 << 1)
+# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
+# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
+#define MAC_RX_BUFF1_STATUS 0x10
+#define MAC_RX_BUFF1_ADDR 0x14
+#define MAC_RX_BUFF2_STATUS 0x20
+#define MAC_RX_BUFF2_ADDR 0x24
+#define MAC_RX_BUFF3_STATUS 0x30
+#define MAC_RX_BUFF3_ADDR 0x34
+
/*
* Theory of operation
*
@@ -152,10 +270,12 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
if (force_reset || (!aup->mac_enabled)) {
writel(MAC_EN_CLOCK_ENABLE, aup->enable);
- au_sync_delay(2);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
| MAC_EN_CLOCK_ENABLE), aup->enable);
- au_sync_delay(2);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
aup->mac_enabled = 1;
}
@@ -273,7 +393,8 @@ static void au1000_hard_stop(struct net_device *dev)
reg = readl(&aup->mac->control);
reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
writel(reg, &aup->mac->control);
- au_sync_delay(10);
+ wmb(); /* drain writebuffer */
+ mdelay(10);
}
static void au1000_enable_rx_tx(struct net_device *dev)
@@ -286,7 +407,8 @@ static void au1000_enable_rx_tx(struct net_device *dev)
reg = readl(&aup->mac->control);
reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
writel(reg, &aup->mac->control);
- au_sync_delay(10);
+ wmb(); /* drain writebuffer */
+ mdelay(10);
}
static void
@@ -336,7 +458,8 @@ au1000_adjust_link(struct net_device *dev)
reg |= MAC_DISABLE_RX_OWN;
}
writel(reg, &aup->mac->control);
- au_sync_delay(1);
+ wmb(); /* drain writebuffer */
+ mdelay(1);
au1000_enable_rx_tx(dev);
aup->old_duplex = phydev->duplex;
@@ -500,9 +623,11 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
au1000_hard_stop(dev);
writel(MAC_EN_CLOCK_ENABLE, aup->enable);
- au_sync_delay(2);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
writel(0, aup->enable);
- au_sync_delay(2);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
aup->tx_full = 0;
for (i = 0; i < NUM_RX_DMA; i++) {
@@ -652,7 +777,7 @@ static int au1000_init(struct net_device *dev)
for (i = 0; i < NUM_RX_DMA; i++)
aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
control = MAC_RX_ENABLE | MAC_TX_ENABLE;
#ifndef CONFIG_CPU_LITTLE_ENDIAN
@@ -669,7 +794,7 @@ static int au1000_init(struct net_device *dev)
writel(control, &aup->mac->control);
writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
- au_sync();
+ wmb(); /* drain writebuffer */
spin_unlock_irqrestore(&aup->lock, flags);
return 0;
@@ -760,7 +885,7 @@ static int au1000_rx(struct net_device *dev)
}
prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
- au_sync();
+ wmb(); /* drain writebuffer */
/* next descriptor */
prxd = aup->rx_dma_ring[aup->rx_head];
@@ -808,7 +933,7 @@ static void au1000_tx_ack(struct net_device *dev)
au1000_update_tx_stats(dev, ptxd->status);
ptxd->buff_stat &= ~TX_T_DONE;
ptxd->len = 0;
- au_sync();
+ wmb(); /* drain writebuffer */
aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
ptxd = aup->tx_dma_ring[aup->tx_tail];
@@ -939,7 +1064,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
ps->tx_bytes += ptxd->len;
ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index e7cc9174e364..4a8fdc4721d5 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -481,37 +481,32 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
dma_addr_t *new_dma_addr_list;
struct pcnet32_tx_head *new_tx_ring;
struct sk_buff **new_skb_list;
+ unsigned int entries = BIT(size);
pcnet32_purge_tx_ring(dev);
- new_tx_ring = pci_alloc_consistent(lp->pci_dev,
- sizeof(struct pcnet32_tx_head) *
- (1 << size),
- &new_ring_dma_addr);
- if (new_tx_ring == NULL) {
- netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ new_tx_ring =
+ pci_zalloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) * entries,
+ &new_ring_dma_addr);
+ if (new_tx_ring == NULL)
return;
- }
- memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
- new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t),
- GFP_ATOMIC);
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
if (!new_dma_addr_list)
goto free_new_tx_ring;
- new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
if (!new_skb_list)
goto free_new_lists;
kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr);
pci_free_consistent(lp->pci_dev,
- sizeof(struct pcnet32_tx_head) *
- lp->tx_ring_size, lp->tx_ring,
- lp->tx_ring_dma_addr);
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+ lp->tx_ring, lp->tx_ring_dma_addr);
- lp->tx_ring_size = (1 << size);
+ lp->tx_ring_size = entries;
lp->tx_mod_mask = lp->tx_ring_size - 1;
lp->tx_len_bits = (size << 12);
lp->tx_ring = new_tx_ring;
@@ -524,8 +519,7 @@ free_new_lists:
kfree(new_dma_addr_list);
free_new_tx_ring:
pci_free_consistent(lp->pci_dev,
- sizeof(struct pcnet32_tx_head) *
- (1 << size),
+ sizeof(struct pcnet32_tx_head) * entries,
new_tx_ring,
new_ring_dma_addr);
}
@@ -549,17 +543,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
struct pcnet32_rx_head *new_rx_ring;
struct sk_buff **new_skb_list;
int new, overlap;
- unsigned int entries = 1 << size;
+ unsigned int entries = BIT(size);
- new_rx_ring = pci_alloc_consistent(lp->pci_dev,
- sizeof(struct pcnet32_rx_head) *
- entries,
- &new_ring_dma_addr);
- if (new_rx_ring == NULL) {
- netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ new_rx_ring =
+ pci_zalloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) * entries,
+ &new_ring_dma_addr);
+ if (new_rx_ring == NULL)
return;
- }
- memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
if (!new_dma_addr_list)
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 4345332533ad..316e0c3fe048 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -831,17 +831,14 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
/* real ring DMA buffer */
size = adapter->ring_size;
- adapter->ring_vir_addr = pci_alloc_consistent(pdev,
- adapter->ring_size, &adapter->ring_dma);
-
+ adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size,
+ &adapter->ring_dma);
if (adapter->ring_vir_addr == NULL) {
netdev_err(adapter->netdev,
"pci_alloc_consistent failed, size = D%d\n", size);
return -ENOMEM;
}
- memset(adapter->ring_vir_addr, 0, adapter->ring_size);
-
rx_page_desc = rx_ring->rx_page_desc;
/* Init TPD Ring */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4247356c16ff..1a162d21d8ac 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6527,11 +6527,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* We control everything through one PF */
func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
- if ((pdev->device == 0xa000 && func != 0) ||
- func != ent->driver_data) {
+ if (func != ent->driver_data) {
pci_save_state(pdev); /* to restore SR-IOV later */
- err = 0;
- goto out_unmap_bar0;
+ goto sriov;
}
adapter->pdev = pdev;
@@ -6697,6 +6695,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_offload(adapter))
attach_ulds(adapter);
+sriov:
#ifdef CONFIG_PCI_IOV
if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 5abc496bcf29..37472ce4fac3 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -432,14 +432,12 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ vdev->fw_info = pci_zalloc_consistent(vdev->pdev,
+ sizeof(struct vnic_devcmd_fw_info),
+ &vdev->fw_info_pa);
if (!vdev->fw_info)
return -ENOMEM;
- memset(vdev->fw_info, 0, sizeof(struct vnic_devcmd_fw_info));
-
a0 = vdev->fw_info_pa;
a1 = sizeof(struct vnic_devcmd_fw_info);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index fc5413488496..1eedfba2ad3c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -41,7 +41,6 @@
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
#include <asm/pgtable.h>
-#include <asm/mpc8xx.h>
#include <asm/cpm1.h>
#endif
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index b4bf02f57d43..90b3b19b7cd3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -40,7 +40,6 @@
#ifdef CONFIG_8xx
#include <asm/8xx_immap.h>
#include <asm/pgtable.h>
-#include <asm/mpc8xx.h>
#include <asm/cpm1.h>
#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 681a9e81ff51..e8ba7470700a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1948,7 +1948,7 @@ static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
/* add filter to the list */
if (parent)
- hlist_add_after(&parent->fdir_node, &input->fdir_node);
+ hlist_add_behind(&input->fdir_node, &parent->fdir_node);
else
hlist_add_head(&input->fdir_node,
&pf->fdir_filter_list);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 94a1c07efeb0..e4100b5737b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2517,7 +2517,7 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* add filter to the list */
if (parent)
- hlist_add_after(&parent->fdir_node, &input->fdir_node);
+ hlist_add_behind(&input->fdir_node, &parent->fdir_node);
else
hlist_add_head(&input->fdir_node,
&adapter->fdir_filter_list);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 69693384b58c..59915144aabb 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1622,11 +1622,10 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
if (!sky2->tx_ring)
goto nomem;
- sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
- &sky2->rx_le_map);
+ sky2->rx_le = pci_zalloc_consistent(hw->pdev, RX_LE_BYTES,
+ &sky2->rx_le_map);
if (!sky2->rx_le)
goto nomem;
- memset(sky2->rx_le, 0, RX_LE_BYTES);
sky2->rx_ring = kcalloc(sky2->rx_pending, sizeof(struct rx_ring_info),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 064a48d0c368..cd5f106306d9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4409,14 +4409,13 @@ static int ksz_alloc_desc(struct dev_info *adapter)
DESC_ALIGNMENT;
adapter->desc_pool.alloc_virt =
- pci_alloc_consistent(
- adapter->pdev, adapter->desc_pool.alloc_size,
- &adapter->desc_pool.dma_addr);
+ pci_zalloc_consistent(adapter->pdev,
+ adapter->desc_pool.alloc_size,
+ &adapter->desc_pool.dma_addr);
if (adapter->desc_pool.alloc_virt == NULL) {
adapter->desc_pool.alloc_size = 0;
return 1;
}
- memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
/* Align to the next cache line boundary. */
offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 6f6be57f4690..b8d5270359cd 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -129,14 +129,12 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
return NX_RCODE_INVALID_ARGS;
}
- addr = pci_alloc_consistent(adapter->pdev, size, &md_template_addr);
-
+ addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
if (!addr) {
dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
return -ENOMEM;
}
- memset(addr, 0, size);
memset(&cmd, 0, sizeof(cmd));
memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b40050e03a56..d836ace52277 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2727,23 +2727,22 @@ static void ql_free_shadow_space(struct ql_adapter *qdev)
static int ql_alloc_shadow_space(struct ql_adapter *qdev)
{
qdev->rx_ring_shadow_reg_area =
- pci_alloc_consistent(qdev->pdev,
- PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
+ pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
+ &qdev->rx_ring_shadow_reg_dma);
if (qdev->rx_ring_shadow_reg_area == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
- memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
+
qdev->tx_ring_shadow_reg_area =
- pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
- &qdev->tx_ring_shadow_reg_dma);
+ pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
+ &qdev->tx_ring_shadow_reg_dma);
if (qdev->tx_ring_shadow_reg_area == NULL) {
netif_err(qdev, ifup, qdev->ndev,
"Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
- memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
return 0;
err_wqp_sh_area:
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 02a3ee282eee..d5e07def6a59 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -585,7 +585,7 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*16) /* 16MB */
#define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY (1024*1024*15) /* 15MB */
-#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 16) /* 16MB */
+#define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */
#define NETVSC_INVALID_INDEX -1
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 5f91e3e01c04..aab2cf72d025 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -18,6 +18,7 @@
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/interrupt.h>
@@ -175,6 +176,7 @@ struct au1k_private {
struct resource *ioarea;
struct au1k_irda_platform_data *platdata;
+ struct clk *irda_clk;
};
static int qos_mtt_bits = 0x07; /* 1 ms or more */
@@ -514,9 +516,39 @@ static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
static int au1k_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
- u32 enable, ring_address;
+ u32 enable, ring_address, phyck;
+ struct clk *c;
int i;
+ c = clk_get(NULL, "irda_clk");
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ i = clk_prepare_enable(c);
+ if (i) {
+ clk_put(c);
+ return i;
+ }
+
+ switch (clk_get_rate(c)) {
+ case 40000000:
+ phyck = IR_PHYCLK_40MHZ;
+ break;
+ case 48000000:
+ phyck = IR_PHYCLK_48MHZ;
+ break;
+ case 56000000:
+ phyck = IR_PHYCLK_56MHZ;
+ break;
+ case 64000000:
+ phyck = IR_PHYCLK_64MHZ;
+ break;
+ default:
+ clk_disable_unprepare(c);
+ clk_put(c);
+ return -EINVAL;
+ }
+ aup->irda_clk = c;
+
enable = IR_HC | IR_CE | IR_C;
#ifndef CONFIG_CPU_LITTLE_ENDIAN
enable |= IR_BE;
@@ -545,7 +577,7 @@ static int au1k_init(struct net_device *dev)
irda_write(aup, IR_RING_SIZE,
(RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
- irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN);
+ irda_write(aup, IR_CONFIG_2, phyck | IR_ONE_PIN);
irda_write(aup, IR_RING_ADDR_CMPR, 0);
au1k_irda_set_speed(dev, 9600);
@@ -619,6 +651,9 @@ static int au1k_irda_stop(struct net_device *dev)
free_irq(aup->irq_tx, dev);
free_irq(aup->irq_rx, dev);
+ clk_disable_unprepare(aup->irda_clk);
+ clk_put(aup->irda_clk);
+
return 0;
}
@@ -853,6 +888,7 @@ static int au1k_irda_probe(struct platform_device *pdev)
struct au1k_private *aup;
struct net_device *dev;
struct resource *r;
+ struct clk *c;
int err;
dev = alloc_irdadev(sizeof(struct au1k_private));
@@ -886,6 +922,14 @@ static int au1k_irda_probe(struct platform_device *pdev)
if (!aup->ioarea)
goto out;
+ /* bail out early if clock doesn't exist */
+ c = clk_get(NULL, "irda_clk");
+ if (IS_ERR(c)) {
+ err = PTR_ERR(c);
+ goto out;
+ }
+ clk_put(c);
+
aup->iobase = ioremap_nocache(r->start, resource_size(r));
if (!aup->iobase)
goto out2;
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 485006604bbc..58ef59469dd0 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -485,13 +485,13 @@ static int vlsi_create_hwif(vlsi_irda_dev_t *idev)
idev->virtaddr = NULL;
idev->busaddr = 0;
- ringarea = pci_alloc_consistent(idev->pdev, HW_RING_AREA_SIZE, &idev->busaddr);
+ ringarea = pci_zalloc_consistent(idev->pdev, HW_RING_AREA_SIZE,
+ &idev->busaddr);
if (!ringarea) {
IRDA_ERROR("%s: insufficient memory for descriptor rings\n",
__func__);
goto out;
}
- memset(ringarea, 0, HW_RING_AREA_SIZE);
hwmap = (struct ring_descr_hw *)ringarea;
idev->rx_ring = vlsi_alloc_ring(idev->pdev, hwmap, ringsize[1],
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 9f194a0bef7c..37eed4d84e9c 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -5,8 +5,8 @@ comment "Host-side USB support is needed for USB Network Adapter support"
depends on !USB && NET
menuconfig USB_NET_DRIVERS
- bool "USB Network Adapters"
- default y
+ tristate "USB Network Adapters"
+ default USB if USB
depends on USB && NET
if USB_NET_DRIVERS
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index b76f7dcde0db..d0db371c30a7 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -766,7 +766,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
gdesc->dword[3] = 0;
netdev_dbg(adapter->netdev,
- "txd[%u]: 0x%llu %u %u\n",
+ "txd[%u]: 0x%llx %u %u\n",
tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index b2137e8f7ca6..16604bdf5197 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -189,6 +189,7 @@ config USB_NET_RNDIS_WLAN
tristate "Wireless RNDIS USB support"
depends on USB
depends on CFG80211
+ select USB_NET_DRIVERS
select USB_USBNET
select USB_NET_CDCETHER
select USB_NET_RNDIS_HOST
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index dfc6dfc56d52..1ab8e500fb77 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -3449,8 +3449,9 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
return -ENOMEM;
for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
- v = pci_alloc_consistent(priv->pci_dev,
- sizeof(struct ipw2100_cmd_header), &p);
+ v = pci_zalloc_consistent(priv->pci_dev,
+ sizeof(struct ipw2100_cmd_header),
+ &p);
if (!v) {
printk(KERN_ERR DRV_NAME ": "
"%s: PCI alloc failed for msg "
@@ -3459,8 +3460,6 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
break;
}
- memset(v, 0, sizeof(struct ipw2100_cmd_header));
-
priv->msg_buffers[i].type = COMMAND;
priv->msg_buffers[i].info.c_struct.cmd =
(struct ipw2100_cmd_header *)v;
@@ -4336,16 +4335,12 @@ static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
IPW_DEBUG_INFO("enter\n");
q->size = entries * sizeof(struct ipw2100_status);
- q->drv =
- (struct ipw2100_status *)pci_alloc_consistent(priv->pci_dev,
- q->size, &q->nic);
+ q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
if (!q->drv) {
IPW_DEBUG_WARNING("Can not allocate status queue.\n");
return -ENOMEM;
}
- memset(q->drv, 0, q->size);
-
IPW_DEBUG_INFO("exit\n");
return 0;
@@ -4374,13 +4369,12 @@ static int bd_queue_allocate(struct ipw2100_priv *priv,
q->entries = entries;
q->size = entries * sizeof(struct ipw2100_bd);
- q->drv = pci_alloc_consistent(priv->pci_dev, q->size, &q->nic);
+ q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
if (!q->drv) {
IPW_DEBUG_INFO
("can't allocate shared memory for buffer descriptors\n");
return -ENOMEM;
}
- memset(q->drv, 0, q->size);
IPW_DEBUG_INFO("exit\n");
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 9a3d4d6724f7..fc6cb215e761 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1159,12 +1159,11 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size;
- rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
+ rxq->rxd = pci_zalloc_consistent(priv->pdev, size, &rxq->rxd_dma);
if (rxq->rxd == NULL) {
wiphy_err(hw->wiphy, "failed to alloc RX descriptors\n");
return -ENOMEM;
}
- memset(rxq->rxd, 0, size);
rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL);
if (rxq->buf == NULL) {
@@ -1451,12 +1450,11 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
- txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
+ txq->txd = pci_zalloc_consistent(priv->pdev, size, &txq->txd_dma);
if (txq->txd == NULL) {
wiphy_err(hw->wiphy, "failed to alloc TX descriptors\n");
return -ENOMEM;
}
- memset(txq->txd, 0, size);
txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
if (txq->skb == NULL) {
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 4b904f708184..fcc45e5bf50a 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -972,16 +972,13 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
else
priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc);
- priv->rx_ring = pci_alloc_consistent(priv->pdev,
- priv->rx_ring_sz * 32,
- &priv->rx_ring_dma);
-
+ priv->rx_ring = pci_zalloc_consistent(priv->pdev, priv->rx_ring_sz * 32,
+ &priv->rx_ring_dma);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
return -ENOMEM;
}
- memset(priv->rx_ring, 0, priv->rx_ring_sz * 32);
priv->rx_idx = 0;
for (i = 0; i < 32; i++) {
@@ -1040,14 +1037,14 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
dma_addr_t dma;
int i;
- ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
+ ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries,
+ &dma);
if (!ring || (unsigned long)ring & 0xFF) {
wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
prio);
return -ENOMEM;
}
- memset(ring, 0, sizeof(*ring)*entries);
priv->tx_ring[prio].desc = ring;
priv->tx_ring[prio].dma = dma;
priv->tx_ring[prio].idx = 0;
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index dae55257f0e8..67d1ee6edcad 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1092,16 +1092,14 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
u32 nextdescaddress;
int i;
- ring = pci_alloc_consistent(rtlpci->pdev,
- sizeof(*ring) * entries, &dma);
-
+ ring = pci_zalloc_consistent(rtlpci->pdev, sizeof(*ring) * entries,
+ &dma);
if (!ring || (unsigned long)ring & 0xFF) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Cannot allocate TX ring (prio = %d)\n", prio);
return -ENOMEM;
}
- memset(ring, 0, sizeof(*ring) * entries);
rtlpci->tx_ring[prio].desc = ring;
rtlpci->tx_ring[prio].dma = dma;
rtlpci->tx_ring[prio].idx = 0;
@@ -1139,10 +1137,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
rx_queue_idx++) {
rtlpci->rx_ring[rx_queue_idx].desc =
- pci_alloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rx_queue_idx].
- desc) * rtlpci->rxringcount,
- &rtlpci->rx_ring[rx_queue_idx].dma);
+ pci_zalloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rx_queue_idx].dma);
if (!rtlpci->rx_ring[rx_queue_idx].desc ||
(unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
@@ -1151,10 +1148,6 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
return -ENOMEM;
}
- memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
- sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
- rtlpci->rxringcount);
-
rtlpci->rx_ring[rx_queue_idx].idx = 0;
/* If amsdu_8k is disabled, set buffersize to 4096. This
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index c864f82bd37d..30e981be14c2 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -2204,7 +2204,7 @@ static int __init parport_ip32_init(void)
{
pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
this_port = parport_ip32_probe_port();
- return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
+ return PTR_ERR_OR_ZERO(this_port);
}
/**
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 21df477be0c8..2d8a4d05d78f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -46,4 +46,12 @@ config PCI_HOST_GENERIC
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
+config PCIE_SPEAR13XX
+ tristate "STMicroelectronics SPEAr PCIe controller"
+ depends on ARCH_SPEAR13XX
+ select PCIEPORTBUS
+ select PCIE_DW
+ help
+ Say Y here if you want PCIe support on SPEAr13XX SoCs.
+
endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 611ba4b48c94..0daec7941aba 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o
obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
+obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index c284e841e3ea..abd65784618d 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -41,11 +41,12 @@
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
-#include <linux/tegra-cpuidle.h>
-#include <linux/tegra-powergate.h>
#include <linux/vmalloc.h>
#include <linux/regulator/consumer.h>
+#include <soc/tegra/cpuidle.h>
+#include <soc/tegra/pmc.h>
+
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
#include <asm/mach/pci.h>
@@ -233,7 +234,6 @@ struct tegra_pcie_soc_data {
bool has_pex_clkreq_en;
bool has_pex_bias_ctrl;
bool has_intr_prsnt_sense;
- bool has_avdd_supply;
bool has_cml_clk;
};
@@ -272,9 +272,8 @@ struct tegra_pcie {
unsigned int num_ports;
u32 xbar_config;
- struct regulator *pex_clk_supply;
- struct regulator *vdd_supply;
- struct regulator *avdd_supply;
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
const struct tegra_pcie_soc_data *soc_data;
};
@@ -894,7 +893,6 @@ static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
static void tegra_pcie_power_off(struct tegra_pcie *pcie)
{
- const struct tegra_pcie_soc_data *soc = pcie->soc_data;
int err;
/* TODO: disable and unprepare clocks? */
@@ -905,23 +903,9 @@ static void tegra_pcie_power_off(struct tegra_pcie *pcie)
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
- if (soc->has_avdd_supply) {
- err = regulator_disable(pcie->avdd_supply);
- if (err < 0)
- dev_warn(pcie->dev,
- "failed to disable AVDD regulator: %d\n",
- err);
- }
-
- err = regulator_disable(pcie->pex_clk_supply);
- if (err < 0)
- dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
- err);
-
- err = regulator_disable(pcie->vdd_supply);
+ err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
if (err < 0)
- dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
- err);
+ dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
}
static int tegra_pcie_power_on(struct tegra_pcie *pcie)
@@ -936,28 +920,9 @@ static int tegra_pcie_power_on(struct tegra_pcie *pcie)
tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
/* enable regulators */
- err = regulator_enable(pcie->vdd_supply);
- if (err < 0) {
- dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
- return err;
- }
-
- err = regulator_enable(pcie->pex_clk_supply);
- if (err < 0) {
- dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
- err);
- return err;
- }
-
- if (soc->has_avdd_supply) {
- err = regulator_enable(pcie->avdd_supply);
- if (err < 0) {
- dev_err(pcie->dev,
- "failed to enable AVDD regulator: %d\n",
- err);
- return err;
- }
- }
+ err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
+ if (err < 0)
+ dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
pcie->pex_clk,
@@ -1394,14 +1359,157 @@ static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
return -EINVAL;
}
+/*
+ * Check whether a given set of supplies is available in a device tree node.
+ * This is used to check whether the new or the legacy device tree bindings
+ * should be used.
+ */
+static bool of_regulator_bulk_available(struct device_node *np,
+ struct regulator_bulk_data *supplies,
+ unsigned int num_supplies)
+{
+ char property[32];
+ unsigned int i;
+
+ for (i = 0; i < num_supplies; i++) {
+ snprintf(property, 32, "%s-supply", supplies[i].supply);
+
+ if (of_find_property(np, property, NULL) == NULL)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Old versions of the device tree binding for this device used a set of power
+ * supplies that didn't match the hardware inputs. This happened to work for a
+ * number of cases but is not future proof. However to preserve backwards-
+ * compatibility with old device trees, this function will try to use the old
+ * set of supplies.
+ */
+static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
+{
+ struct device_node *np = pcie->dev->of_node;
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
+ pcie->num_supplies = 3;
+ else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
+ pcie->num_supplies = 2;
+
+ if (pcie->num_supplies == 0) {
+ dev_err(pcie->dev, "device %s not supported in legacy mode\n",
+ np->full_name);
+ return -ENODEV;
+ }
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "pex-clk";
+ pcie->supplies[1].supply = "vdd";
+
+ if (pcie->num_supplies > 2)
+ pcie->supplies[2].supply = "avdd";
+
+ return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ pcie->supplies);
+}
+
+/*
+ * Obtains the list of regulators required for a particular generation of the
+ * IP block.
+ *
+ * This would've been nice to do simply by providing static tables for use
+ * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
+ * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
+ * and either seems to be optional depending on which ports are being used.
+ */
+static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
+{
+ struct device_node *np = pcie->dev->of_node;
+ unsigned int i = 0;
+
+ if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+ bool need_pexa = false, need_pexb = false;
+
+ /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
+ if (lane_mask & 0x0f)
+ need_pexa = true;
+
+ /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
+ if (lane_mask & 0x30)
+ need_pexb = true;
+
+ pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
+ (need_pexb ? 2 : 0);
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[i++].supply = "avdd-pex-pll";
+ pcie->supplies[i++].supply = "hvdd-pex";
+ pcie->supplies[i++].supply = "vddio-pex-ctl";
+ pcie->supplies[i++].supply = "avdd-plle";
+
+ if (need_pexa) {
+ pcie->supplies[i++].supply = "avdd-pexa";
+ pcie->supplies[i++].supply = "vdd-pexa";
+ }
+
+ if (need_pexb) {
+ pcie->supplies[i++].supply = "avdd-pexb";
+ pcie->supplies[i++].supply = "vdd-pexb";
+ }
+ } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
+ pcie->num_supplies = 5;
+
+ pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+ sizeof(*pcie->supplies),
+ GFP_KERNEL);
+ if (!pcie->supplies)
+ return -ENOMEM;
+
+ pcie->supplies[0].supply = "avdd-pex";
+ pcie->supplies[1].supply = "vdd-pex";
+ pcie->supplies[2].supply = "avdd-pex-pll";
+ pcie->supplies[3].supply = "avdd-plle";
+ pcie->supplies[4].supply = "vddio-pex-clk";
+ }
+
+ if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
+ pcie->num_supplies))
+ return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
+ pcie->supplies);
+
+ /*
+ * If not all regulators are available for this new scheme, assume
+ * that the device tree complies with an older version of the device
+ * tree binding.
+ */
+ dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
+
+ devm_kfree(pcie->dev, pcie->supplies);
+ pcie->num_supplies = 0;
+
+ return tegra_pcie_get_legacy_regulators(pcie);
+}
+
static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
{
const struct tegra_pcie_soc_data *soc = pcie->soc_data;
struct device_node *np = pcie->dev->of_node, *port;
struct of_pci_range_parser parser;
struct of_pci_range range;
+ u32 lanes = 0, mask = 0;
+ unsigned int lane = 0;
struct resource res;
- u32 lanes = 0;
int err;
if (of_pci_range_parser_init(&parser, np)) {
@@ -1409,20 +1517,6 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
return -EINVAL;
}
- pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
- if (IS_ERR(pcie->vdd_supply))
- return PTR_ERR(pcie->vdd_supply);
-
- pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
- if (IS_ERR(pcie->pex_clk_supply))
- return PTR_ERR(pcie->pex_clk_supply);
-
- if (soc->has_avdd_supply) {
- pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
- if (IS_ERR(pcie->avdd_supply))
- return PTR_ERR(pcie->avdd_supply);
- }
-
for_each_of_pci_range(&parser, &range) {
of_pci_range_to_resource(&range, np, &res);
@@ -1490,8 +1584,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
lanes |= value << (index << 3);
- if (!of_device_is_available(port))
+ if (!of_device_is_available(port)) {
+ lane += value;
continue;
+ }
+
+ mask |= ((1 << value) - 1) << lane;
+ lane += value;
rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
if (!rp)
@@ -1522,6 +1621,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
return err;
}
+ err = tegra_pcie_get_regulators(pcie, mask);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -1615,7 +1718,6 @@ static const struct tegra_pcie_soc_data tegra20_pcie_data = {
.has_pex_clkreq_en = false,
.has_pex_bias_ctrl = false,
.has_intr_prsnt_sense = false,
- .has_avdd_supply = false,
.has_cml_clk = false,
};
@@ -1627,7 +1729,6 @@ static const struct tegra_pcie_soc_data tegra30_pcie_data = {
.has_pex_clkreq_en = true,
.has_pex_bias_ctrl = true,
.has_intr_prsnt_sense = true,
- .has_avdd_supply = true,
.has_cml_clk = true,
};
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
new file mode 100644
index 000000000000..6dea9e43a75c
--- /dev/null
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -0,0 +1,393 @@
+/*
+ * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs
+ *
+ * SPEAr13xx PCIe Glue Layer Source Code
+ *
+ * Copyright (C) 2010-2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@st.com>
+ * Mohit Kumar <mohit.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pci.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+
+#include "pcie-designware.h"
+
+struct spear13xx_pcie {
+ void __iomem *app_base;
+ struct phy *phy;
+ struct clk *clk;
+ struct pcie_port pp;
+ bool is_gen1;
+};
+
+struct pcie_app_reg {
+ u32 app_ctrl_0; /* cr0 */
+ u32 app_ctrl_1; /* cr1 */
+ u32 app_status_0; /* cr2 */
+ u32 app_status_1; /* cr3 */
+ u32 msg_status; /* cr4 */
+ u32 msg_payload; /* cr5 */
+ u32 int_sts; /* cr6 */
+ u32 int_clr; /* cr7 */
+ u32 int_mask; /* cr8 */
+ u32 mst_bmisc; /* cr9 */
+ u32 phy_ctrl; /* cr10 */
+ u32 phy_status; /* cr11 */
+ u32 cxpl_debug_info_0; /* cr12 */
+ u32 cxpl_debug_info_1; /* cr13 */
+ u32 ven_msg_ctrl_0; /* cr14 */
+ u32 ven_msg_ctrl_1; /* cr15 */
+ u32 ven_msg_data_0; /* cr16 */
+ u32 ven_msg_data_1; /* cr17 */
+ u32 ven_msi_0; /* cr18 */
+ u32 ven_msi_1; /* cr19 */
+ u32 mst_rmisc; /* cr20 */
+};
+
+/* CR0 ID */
+#define RX_LANE_FLIP_EN_ID 0
+#define TX_LANE_FLIP_EN_ID 1
+#define SYS_AUX_PWR_DET_ID 2
+#define APP_LTSSM_ENABLE_ID 3
+#define SYS_ATTEN_BUTTON_PRESSED_ID 4
+#define SYS_MRL_SENSOR_STATE_ID 5
+#define SYS_PWR_FAULT_DET_ID 6
+#define SYS_MRL_SENSOR_CHGED_ID 7
+#define SYS_PRE_DET_CHGED_ID 8
+#define SYS_CMD_CPLED_INT_ID 9
+#define APP_INIT_RST_0_ID 11
+#define APP_REQ_ENTR_L1_ID 12
+#define APP_READY_ENTR_L23_ID 13
+#define APP_REQ_EXIT_L1_ID 14
+#define DEVICE_TYPE_EP (0 << 25)
+#define DEVICE_TYPE_LEP (1 << 25)
+#define DEVICE_TYPE_RC (4 << 25)
+#define SYS_INT_ID 29
+#define MISCTRL_EN_ID 30
+#define REG_TRANSLATION_ENABLE 31
+
+/* CR1 ID */
+#define APPS_PM_XMT_TURNOFF_ID 2
+#define APPS_PM_XMT_PME_ID 5
+
+/* CR3 ID */
+#define XMLH_LTSSM_STATE_DETECT_QUIET 0x00
+#define XMLH_LTSSM_STATE_DETECT_ACT 0x01
+#define XMLH_LTSSM_STATE_POLL_ACTIVE 0x02
+#define XMLH_LTSSM_STATE_POLL_COMPLIANCE 0x03
+#define XMLH_LTSSM_STATE_POLL_CONFIG 0x04
+#define XMLH_LTSSM_STATE_PRE_DETECT_QUIET 0x05
+#define XMLH_LTSSM_STATE_DETECT_WAIT 0x06
+#define XMLH_LTSSM_STATE_CFG_LINKWD_START 0x07
+#define XMLH_LTSSM_STATE_CFG_LINKWD_ACEPT 0x08
+#define XMLH_LTSSM_STATE_CFG_LANENUM_WAIT 0x09
+#define XMLH_LTSSM_STATE_CFG_LANENUM_ACEPT 0x0A
+#define XMLH_LTSSM_STATE_CFG_COMPLETE 0x0B
+#define XMLH_LTSSM_STATE_CFG_IDLE 0x0C
+#define XMLH_LTSSM_STATE_RCVRY_LOCK 0x0D
+#define XMLH_LTSSM_STATE_RCVRY_SPEED 0x0E
+#define XMLH_LTSSM_STATE_RCVRY_RCVRCFG 0x0F
+#define XMLH_LTSSM_STATE_RCVRY_IDLE 0x10
+#define XMLH_LTSSM_STATE_L0 0x11
+#define XMLH_LTSSM_STATE_L0S 0x12
+#define XMLH_LTSSM_STATE_L123_SEND_EIDLE 0x13
+#define XMLH_LTSSM_STATE_L1_IDLE 0x14
+#define XMLH_LTSSM_STATE_L2_IDLE 0x15
+#define XMLH_LTSSM_STATE_L2_WAKE 0x16
+#define XMLH_LTSSM_STATE_DISABLED_ENTRY 0x17
+#define XMLH_LTSSM_STATE_DISABLED_IDLE 0x18
+#define XMLH_LTSSM_STATE_DISABLED 0x19
+#define XMLH_LTSSM_STATE_LPBK_ENTRY 0x1A
+#define XMLH_LTSSM_STATE_LPBK_ACTIVE 0x1B
+#define XMLH_LTSSM_STATE_LPBK_EXIT 0x1C
+#define XMLH_LTSSM_STATE_LPBK_EXIT_TIMEOUT 0x1D
+#define XMLH_LTSSM_STATE_HOT_RESET_ENTRY 0x1E
+#define XMLH_LTSSM_STATE_HOT_RESET 0x1F
+#define XMLH_LTSSM_STATE_MASK 0x3F
+#define XMLH_LINK_UP (1 << 6)
+
+/* CR4 ID */
+#define CFG_MSI_EN_ID 18
+
+/* CR6 */
+#define INTA_CTRL_INT (1 << 7)
+#define INTB_CTRL_INT (1 << 8)
+#define INTC_CTRL_INT (1 << 9)
+#define INTD_CTRL_INT (1 << 10)
+#define MSI_CTRL_INT (1 << 26)
+
+/* CR19 ID */
+#define VEN_MSI_REQ_ID 11
+#define VEN_MSI_FUN_NUM_ID 8
+#define VEN_MSI_TC_ID 5
+#define VEN_MSI_VECTOR_ID 0
+#define VEN_MSI_REQ_EN ((u32)0x1 << VEN_MSI_REQ_ID)
+#define VEN_MSI_FUN_NUM_MASK ((u32)0x7 << VEN_MSI_FUN_NUM_ID)
+#define VEN_MSI_TC_MASK ((u32)0x7 << VEN_MSI_TC_ID)
+#define VEN_MSI_VECTOR_MASK ((u32)0x1F << VEN_MSI_VECTOR_ID)
+
+#define EXP_CAP_ID_OFFSET 0x70
+
+#define to_spear13xx_pcie(x) container_of(x, struct spear13xx_pcie, pp)
+
+static int spear13xx_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 val;
+ int count = 0;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ u32 exp_cap_off = EXP_CAP_ID_OFFSET;
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "link already up\n");
+ return 0;
+ }
+
+ dw_pcie_setup_rc(pp);
+
+ /*
+ * this controller support only 128 bytes read size, however its
+ * default value in capability register is 512 bytes. So force
+ * it to 128 here.
+ */
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, &val);
+ val &= ~PCI_EXP_DEVCTL_READRQ;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off + PCI_EXP_DEVCTL, 4, val);
+
+ dw_pcie_cfg_write(pp->dbi_base, PCI_VENDOR_ID, 2, 0x104A);
+ dw_pcie_cfg_write(pp->dbi_base, PCI_DEVICE_ID, 2, 0xCD80);
+
+ /*
+ * if is_gen1 is set then handle it, so that some buggy card
+ * also works
+ */
+ if (spear13xx_pcie->is_gen1) {
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCAP, 4,
+ &val);
+ if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
+ PCI_EXP_LNKCAP, 4, val);
+ }
+
+ dw_pcie_cfg_read(pp->dbi_base, exp_cap_off + PCI_EXP_LNKCTL2, 4,
+ &val);
+ if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
+ val &= ~((u32)PCI_EXP_LNKCAP_SLS);
+ val |= PCI_EXP_LNKCAP_SLS_2_5GB;
+ dw_pcie_cfg_write(pp->dbi_base, exp_cap_off +
+ PCI_EXP_LNKCTL2, 4, val);
+ }
+ }
+
+ /* enable ltssm */
+ writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
+ | (1 << APP_LTSSM_ENABLE_ID)
+ | ((u32)1 << REG_TRANSLATION_ENABLE),
+ &app_reg->app_ctrl_0);
+
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ mdelay(100);
+ count++;
+ if (count == 10) {
+ dev_err(pp->dev, "link Fail\n");
+ return -EINVAL;
+ }
+ }
+ dev_info(pp->dev, "link up\n");
+
+ return 0;
+}
+
+static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ unsigned int status;
+
+ status = readl(&app_reg->int_sts);
+
+ if (status & MSI_CTRL_INT) {
+ if (!IS_ENABLED(CONFIG_PCI_MSI))
+ BUG();
+ dw_handle_msi_irq(pp);
+ }
+
+ writel(status, &app_reg->int_clr);
+
+ return IRQ_HANDLED;
+}
+
+static void spear13xx_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+ /* Enable MSI interrupt */
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ dw_pcie_msi_init(pp);
+ writel(readl(&app_reg->int_mask) |
+ MSI_CTRL_INT, &app_reg->int_mask);
+ }
+}
+
+static int spear13xx_pcie_link_up(struct pcie_port *pp)
+{
+ struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp);
+ struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+
+ if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
+ return 1;
+
+ return 0;
+}
+
+static void spear13xx_pcie_host_init(struct pcie_port *pp)
+{
+ spear13xx_pcie_establish_link(pp);
+ spear13xx_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops spear13xx_pcie_host_ops = {
+ .link_up = spear13xx_pcie_link_up,
+ .host_init = spear13xx_pcie_host_init,
+};
+
+static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 0);
+ if (!pp->irq) {
+ dev_err(dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+ IRQF_SHARED, "spear1340-pcie", pp);
+ if (ret) {
+ dev_err(dev, "failed to request irq %d\n", pp->irq);
+ return ret;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &spear13xx_pcie_host_ops;
+
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init spear13xx_pcie_probe(struct platform_device *pdev)
+{
+ struct spear13xx_pcie *spear13xx_pcie;
+ struct pcie_port *pp;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *dbi_base;
+ int ret;
+
+ spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL);
+ if (!spear13xx_pcie) {
+ dev_err(dev, "no memory for SPEAr13xx pcie\n");
+ return -ENOMEM;
+ }
+
+ spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(spear13xx_pcie->phy)) {
+ ret = PTR_ERR(spear13xx_pcie->phy);
+ if (ret == -EPROBE_DEFER)
+ dev_info(dev, "probe deferred\n");
+ else
+ dev_err(dev, "couldn't get pcie-phy\n");
+ return ret;
+ }
+
+ phy_init(spear13xx_pcie->phy);
+
+ spear13xx_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(spear13xx_pcie->clk)) {
+ dev_err(dev, "couldn't get clk for pcie\n");
+ return PTR_ERR(spear13xx_pcie->clk);
+ }
+ ret = clk_prepare_enable(spear13xx_pcie->clk);
+ if (ret) {
+ dev_err(dev, "couldn't enable clk for pcie\n");
+ return ret;
+ }
+
+ pp = &spear13xx_pcie->pp;
+
+ pp->dev = dev;
+
+ dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
+ if (IS_ERR(pp->dbi_base)) {
+ dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
+ ret = PTR_ERR(pp->dbi_base);
+ goto fail_clk;
+ }
+ spear13xx_pcie->app_base = pp->dbi_base + 0x2000;
+
+ if (of_property_read_bool(np, "st,pcie-is-gen1"))
+ spear13xx_pcie->is_gen1 = true;
+
+ ret = add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto fail_clk;
+
+ platform_set_drvdata(pdev, spear13xx_pcie);
+ return 0;
+
+fail_clk:
+ clk_disable_unprepare(spear13xx_pcie->clk);
+
+ return ret;
+}
+
+static const struct of_device_id spear13xx_pcie_of_match[] = {
+ { .compatible = "st,spear1340-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spear13xx_pcie_of_match);
+
+static struct platform_driver spear13xx_pcie_driver __initdata = {
+ .probe = spear13xx_pcie_probe,
+ .driver = {
+ .name = "spear-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spear13xx_pcie_of_match),
+ },
+};
+
+/* SPEAr13xx PCIe driver does not allow module unload */
+
+static int __init pcie_init(void)
+{
+ return platform_driver_register(&spear13xx_pcie_driver);
+}
+module_init(pcie_init);
+
+MODULE_DESCRIPTION("ST Microelectronics SPEAr13xx PCIe host controller driver");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 602d153c7055..70741c8c46a0 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -80,8 +80,9 @@ static struct acpiphp_context *acpiphp_init_context(struct acpi_device *adev)
return NULL;
context->refcount = 1;
- acpi_set_hp_context(adev, &context->hp, acpiphp_hotplug_notify, NULL,
- acpiphp_post_dock_fixup);
+ context->hp.notify = acpiphp_hotplug_notify;
+ context->hp.fixup = acpiphp_post_dock_fixup;
+ acpi_set_hp_context(adev, &context->hp);
return context;
}
@@ -369,20 +370,6 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
return AE_OK;
}
-static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
-{
- struct acpiphp_bridge *bridge = NULL;
-
- acpi_lock_hp_context();
- if (adev->hp) {
- bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
- if (bridge)
- get_bridge(bridge);
- }
- acpi_unlock_hp_context();
- return bridge;
-}
-
static void cleanup_bridge(struct acpiphp_bridge *bridge)
{
struct acpiphp_slot *slot;
@@ -753,9 +740,15 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
void acpiphp_check_host_bridge(struct acpi_device *adev)
{
- struct acpiphp_bridge *bridge;
+ struct acpiphp_bridge *bridge = NULL;
- bridge = acpiphp_dev_to_bridge(adev);
+ acpi_lock_hp_context();
+ if (adev->hp) {
+ bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
+ if (bridge)
+ get_bridge(bridge);
+ }
+ acpi_unlock_hp_context();
if (bridge) {
pci_lock_rescan_remove();
@@ -884,7 +877,7 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
goto err;
root_context->root_bridge = bridge;
- acpi_set_hp_context(adev, &root_context->hp, NULL, NULL, NULL);
+ acpi_set_hp_context(adev, &root_context->hp);
} else {
struct acpiphp_context *context;
@@ -927,7 +920,7 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
kfree(bridge);
}
-void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
+static void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
{
if (pci_is_root_bus(bridge->pci_bus)) {
struct acpiphp_root_context *root_context;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index d1332d2f8730..d77e46bca54c 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -7,8 +7,8 @@
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
-#define COMPONENT "zPCI hpc"
-#define pr_fmt(fmt) COMPONENT ": " fmt
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ca4927ba8433..37263b0ebfe3 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -18,31 +18,31 @@
#include "pci.h"
/**
- * pci_acpi_wake_bus - Wake-up notification handler for root buses.
- * @handle: ACPI handle of a device the notification is for.
- * @event: Type of the signaled event.
- * @context: PCI root bus to wake up devices on.
+ * pci_acpi_wake_bus - Root bus wakeup notification fork function.
+ * @work: Work item to handle.
*/
-static void pci_acpi_wake_bus(acpi_handle handle, u32 event, void *context)
+static void pci_acpi_wake_bus(struct work_struct *work)
{
- struct pci_bus *pci_bus = context;
+ struct acpi_device *adev;
+ struct acpi_pci_root *root;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_bus)
- pci_pme_wakeup_bus(pci_bus);
+ adev = container_of(work, struct acpi_device, wakeup.context.work);
+ root = acpi_driver_data(adev);
+ pci_pme_wakeup_bus(root->bus);
}
/**
- * pci_acpi_wake_dev - Wake-up notification handler for PCI devices.
+ * pci_acpi_wake_dev - PCI device wakeup notification work function.
* @handle: ACPI handle of a device the notification is for.
- * @event: Type of the signaled event.
- * @context: PCI device object to wake up.
+ * @work: Work item to handle.
*/
-static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
+static void pci_acpi_wake_dev(struct work_struct *work)
{
- struct pci_dev *pci_dev = context;
+ struct acpi_device_wakeup_context *context;
+ struct pci_dev *pci_dev;
- if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
- return;
+ context = container_of(work, struct acpi_device_wakeup_context, work);
+ pci_dev = to_pci_dev(context->dev);
if (pci_dev->pme_poll)
pci_dev->pme_poll = false;
@@ -65,23 +65,12 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
}
/**
- * pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
- * @dev: ACPI device to add the notifier for.
- * @pci_bus: PCI bus to walk checking for PME status if an event is signaled.
- */
-acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
- struct pci_bus *pci_bus)
-{
- return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
-}
-
-/**
- * pci_acpi_remove_bus_pm_notifier - Unregister PCI bus PM notifier.
- * @dev: ACPI device to remove the notifier from.
+ * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
+ * @dev: PCI root bridge ACPI device.
*/
-acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
+acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
{
- return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus);
+ return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
}
/**
@@ -92,16 +81,7 @@ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
struct pci_dev *pci_dev)
{
- return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
-}
-
-/**
- * pci_acpi_remove_pm_notifier - Unregister PCI device PM notifier.
- * @dev: ACPI device to remove the notifier from.
- */
-acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
-{
- return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev);
+ return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
}
phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
@@ -170,14 +150,13 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
-
- return handle ? acpi_bus_power_manageable(handle) : false;
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ return adev ? acpi_device_power_manageable(adev) : false;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
static const u8 state_conv[] = {
[PCI_D0] = ACPI_STATE_D0,
[PCI_D1] = ACPI_STATE_D1,
@@ -188,7 +167,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
int error = -EINVAL;
/* If the ACPI device has _EJ0, ignore the device */
- if (!handle || acpi_has_method(handle, "_EJ0"))
+ if (!adev || acpi_has_method(adev->handle, "_EJ0"))
return -ENODEV;
switch (state) {
@@ -202,7 +181,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
case PCI_D1:
case PCI_D2:
case PCI_D3hot:
- error = acpi_bus_set_power(handle, state_conv[state]);
+ error = acpi_device_set_power(adev, state_conv[state]);
}
if (!error)
@@ -214,9 +193,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
-
- return handle ? acpi_bus_can_wakeup(handle) : false;
+ struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ return adev ? acpi_device_can_wakeup(adev) : false;
}
static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 51cf8083b299..b0ce7cdee0c2 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -144,16 +144,6 @@ config TCIC
"Bridge" is the name used for the hardware inside your computer that
PCMCIA cards are plugged into. If unsure, say N.
-config PCMCIA_M8XX
- tristate "MPC8xx PCMCIA support"
- depends on PCCARD && PPC && 8xx
- select PCCARD_IODYN if PCMCIA != n
- help
- Say Y here to include support for PowerPC 8xx series PCMCIA
- controller.
-
- This driver is also available as a module called m8xx_pcmcia.
-
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
depends on MIPS_ALCHEMY && PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index fd55a6951402..27e94b30cf96 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_PD6729) += pd6729.o
obj-$(CONFIG_I82365) += i82365.o
obj-$(CONFIG_I82092) += i82092.o
obj-$(CONFIG_TCIC) += tcic.o
-obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o
obj-$(CONFIG_PCMCIA_SOC_COMMON) += soc_common.o
obj-$(CONFIG_PCMCIA_SA11XX_BASE) += sa11xx_base.o
obj-$(CONFIG_PCMCIA_SA1100) += sa1100_cs.o
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
deleted file mode 100644
index 182034d2ef58..000000000000
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ /dev/null
@@ -1,1168 +0,0 @@
-/*
- * m8xx_pcmcia.c - Linux PCMCIA socket driver for the mpc8xx series.
- *
- * (C) 1999-2000 Magnus Damm <damm@opensource.se>
- * (C) 2001-2002 Montavista Software, Inc.
- * <mlocke@mvista.com>
- *
- * Support for two slots by Cyclades Corporation
- * <oliver.kurth@cyclades.de>
- * Further fixes, v2.6 kernel port
- * <marcelo.tosatti@cyclades.com>
- *
- * Some fixes, additions (C) 2005-2007 Montavista Software, Inc.
- * <vbordug@ru.mvista.com>
- *
- * "The ExCA standard specifies that socket controllers should provide
- * two IO and five memory windows per socket, which can be independently
- * configured and positioned in the host address space and mapped to
- * arbitrary segments of card address space. " - David A Hinds. 1999
- *
- * This controller does _not_ meet the ExCA standard.
- *
- * m8xx pcmcia controller brief info:
- * + 8 windows (attrib, mem, i/o)
- * + up to two slots (SLOT_A and SLOT_B)
- * + inputpins, outputpins, event and mask registers.
- * - no offset register. sigh.
- *
- * Because of the lacking offset register we must map the whole card.
- * We assign each memory window PCMCIA_MEM_WIN_SIZE address space.
- * Make sure there is (PCMCIA_MEM_WIN_SIZE * PCMCIA_MEM_WIN_NO
- * * PCMCIA_SOCKETS_NO) bytes at PCMCIA_MEM_WIN_BASE.
- * The i/o windows are dynamically allocated at PCMCIA_IO_WIN_BASE.
- * They are maximum 64KByte each...
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/string.h>
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/timer.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/fsl_devices.h>
-#include <linux/bitops.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-
-#include <asm/io.h>
-#include <asm/time.h>
-#include <asm/mpc8xx.h>
-#include <asm/8xx_immap.h>
-#include <asm/irq.h>
-#include <asm/fs_pd.h>
-
-#include <pcmcia/ss.h>
-
-#define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args)
-#define pcmcia_error(args...) printk(KERN_ERR "m8xx_pcmcia: "args)
-
-static const char *version = "Version 0.06, Aug 2005";
-MODULE_LICENSE("Dual MPL/GPL");
-
-#if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B)
-
-/* The ADS board use SLOT_A */
-#ifdef CONFIG_ADS
-#define CONFIG_PCMCIA_SLOT_A
-#define CONFIG_BD_IS_MHZ
-#endif
-
-/* The FADS series are a mess */
-#ifdef CONFIG_FADS
-#if defined(CONFIG_MPC860T) || defined(CONFIG_MPC860) || defined(CONFIG_MPC821)
-#define CONFIG_PCMCIA_SLOT_A
-#else
-#define CONFIG_PCMCIA_SLOT_B
-#endif
-#endif
-
-#if defined(CONFIG_MPC885ADS)
-#define CONFIG_PCMCIA_SLOT_A
-#define PCMCIA_GLITCHY_CD
-#endif
-
-/* Cyclades ACS uses both slots */
-#ifdef CONFIG_PRxK
-#define CONFIG_PCMCIA_SLOT_A
-#define CONFIG_PCMCIA_SLOT_B
-#endif
-
-#endif /* !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) */
-
-#if defined(CONFIG_PCMCIA_SLOT_A) && defined(CONFIG_PCMCIA_SLOT_B)
-
-#define PCMCIA_SOCKETS_NO 2
-/* We have only 8 windows, dualsocket support will be limited. */
-#define PCMCIA_MEM_WIN_NO 2
-#define PCMCIA_IO_WIN_NO 2
-#define PCMCIA_SLOT_MSG "SLOT_A and SLOT_B"
-
-#elif defined(CONFIG_PCMCIA_SLOT_A) || defined(CONFIG_PCMCIA_SLOT_B)
-
-#define PCMCIA_SOCKETS_NO 1
-/* full support for one slot */
-#define PCMCIA_MEM_WIN_NO 5
-#define PCMCIA_IO_WIN_NO 2
-
-/* define _slot_ to be able to optimize macros */
-
-#ifdef CONFIG_PCMCIA_SLOT_A
-#define _slot_ 0
-#define PCMCIA_SLOT_MSG "SLOT_A"
-#else
-#define _slot_ 1
-#define PCMCIA_SLOT_MSG "SLOT_B"
-#endif
-
-#else
-#error m8xx_pcmcia: Bad configuration!
-#endif
-
-/* ------------------------------------------------------------------------- */
-
-#define PCMCIA_MEM_WIN_BASE 0xe0000000 /* base address for memory window 0 */
-#define PCMCIA_MEM_WIN_SIZE 0x04000000 /* each memory window is 64 MByte */
-#define PCMCIA_IO_WIN_BASE _IO_BASE /* base address for io window 0 */
-/* ------------------------------------------------------------------------- */
-
-static int pcmcia_schlvl;
-
-static DEFINE_SPINLOCK(events_lock);
-
-#define PCMCIA_SOCKET_KEY_5V 1
-#define PCMCIA_SOCKET_KEY_LV 2
-
-/* look up table for pgcrx registers */
-static u32 *m8xx_pgcrx[2];
-
-/*
- * This structure is used to address each window in the PCMCIA controller.
- *
- * Keep in mind that we assume that pcmcia_win[n+1] is mapped directly
- * after pcmcia_win[n]...
- */
-
-struct pcmcia_win {
- u32 br;
- u32 or;
-};
-
-/*
- * For some reason the hardware guys decided to make both slots share
- * some registers.
- *
- * Could someone invent object oriented hardware ?
- *
- * The macros are used to get the right bit from the registers.
- * SLOT_A : slot = 0
- * SLOT_B : slot = 1
- */
-
-#define M8XX_PCMCIA_VS1(slot) (0x80000000 >> (slot << 4))
-#define M8XX_PCMCIA_VS2(slot) (0x40000000 >> (slot << 4))
-#define M8XX_PCMCIA_VS_MASK(slot) (0xc0000000 >> (slot << 4))
-#define M8XX_PCMCIA_VS_SHIFT(slot) (30 - (slot << 4))
-
-#define M8XX_PCMCIA_WP(slot) (0x20000000 >> (slot << 4))
-#define M8XX_PCMCIA_CD2(slot) (0x10000000 >> (slot << 4))
-#define M8XX_PCMCIA_CD1(slot) (0x08000000 >> (slot << 4))
-#define M8XX_PCMCIA_BVD2(slot) (0x04000000 >> (slot << 4))
-#define M8XX_PCMCIA_BVD1(slot) (0x02000000 >> (slot << 4))
-#define M8XX_PCMCIA_RDY(slot) (0x01000000 >> (slot << 4))
-#define M8XX_PCMCIA_RDY_L(slot) (0x00800000 >> (slot << 4))
-#define M8XX_PCMCIA_RDY_H(slot) (0x00400000 >> (slot << 4))
-#define M8XX_PCMCIA_RDY_R(slot) (0x00200000 >> (slot << 4))
-#define M8XX_PCMCIA_RDY_F(slot) (0x00100000 >> (slot << 4))
-#define M8XX_PCMCIA_MASK(slot) (0xFFFF0000 >> (slot << 4))
-
-#define M8XX_PCMCIA_POR_VALID 0x00000001
-#define M8XX_PCMCIA_POR_WRPROT 0x00000002
-#define M8XX_PCMCIA_POR_ATTRMEM 0x00000010
-#define M8XX_PCMCIA_POR_IO 0x00000018
-#define M8XX_PCMCIA_POR_16BIT 0x00000040
-
-#define M8XX_PGCRX(slot) m8xx_pgcrx[slot]
-
-#define M8XX_PGCRX_CXOE 0x00000080
-#define M8XX_PGCRX_CXRESET 0x00000040
-
-/* we keep one lookup table per socket to check flags */
-
-#define PCMCIA_EVENTS_MAX 5 /* 4 max at a time + termination */
-
-struct event_table {
- u32 regbit;
- u32 eventbit;
-};
-
-static const char driver_name[] = "m8xx-pcmcia";
-
-struct socket_info {
- void (*handler) (void *info, u32 events);
- void *info;
-
- u32 slot;
- pcmconf8xx_t *pcmcia;
- u32 bus_freq;
- int hwirq;
-
- socket_state_t state;
- struct pccard_mem_map mem_win[PCMCIA_MEM_WIN_NO];
- struct pccard_io_map io_win[PCMCIA_IO_WIN_NO];
- struct event_table events[PCMCIA_EVENTS_MAX];
- struct pcmcia_socket socket;
-};
-
-static struct socket_info socket[PCMCIA_SOCKETS_NO];
-
-/*
- * Search this table to see if the windowsize is
- * supported...
- */
-
-#define M8XX_SIZES_NO 32
-
-static const u32 m8xx_size_to_gray[M8XX_SIZES_NO] = {
- 0x00000001, 0x00000002, 0x00000008, 0x00000004,
- 0x00000080, 0x00000040, 0x00000010, 0x00000020,
- 0x00008000, 0x00004000, 0x00001000, 0x00002000,
- 0x00000100, 0x00000200, 0x00000800, 0x00000400,
-
- 0x0fffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0x01000000, 0x02000000, 0xffffffff, 0x04000000,
- 0x00010000, 0x00020000, 0x00080000, 0x00040000,
- 0x00800000, 0x00400000, 0x00100000, 0x00200000
-};
-
-/* ------------------------------------------------------------------------- */
-
-static irqreturn_t m8xx_interrupt(int irq, void *dev);
-
-#define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */
-
-/* FADS Boards from Motorola */
-
-#if defined(CONFIG_FADS)
-
-#define PCMCIA_BOARD_MSG "FADS"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
- u32 reg = 0;
-
- switch (vcc) {
- case 0:
- break;
- case 33:
- reg |= BCSR1_PCCVCC0;
- break;
- case 50:
- reg |= BCSR1_PCCVCC1;
- break;
- default:
- return 1;
- }
-
- switch (vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if (vcc == vpp)
- reg |= BCSR1_PCCVPP1;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= BCSR1_PCCVPP0;
- else
- return 1;
- default:
- return 1;
- }
-
- /* first, turn off all power */
- out_be32((u32 *) BCSR1,
- in_be32((u32 *) BCSR1) & ~(BCSR1_PCCVCC_MASK |
- BCSR1_PCCVPP_MASK));
-
- /* enable new powersettings */
- out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | reg);
-
- return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-
-static void hardware_enable(int slot)
-{
- out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) & ~BCSR1_PCCEN);
-}
-
-static void hardware_disable(int slot)
-{
- out_be32((u32 *) BCSR1, in_be32((u32 *) BCSR1) | BCSR1_PCCEN);
-}
-
-#endif
-
-/* MPC885ADS Boards */
-
-#if defined(CONFIG_MPC885ADS)
-
-#define PCMCIA_BOARD_MSG "MPC885ADS"
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V
-
-static inline void hardware_enable(int slot)
-{
- m8xx_pcmcia_ops.hw_ctrl(slot, 1);
-}
-
-static inline void hardware_disable(int slot)
-{
- m8xx_pcmcia_ops.hw_ctrl(slot, 0);
-}
-
-static inline int voltage_set(int slot, int vcc, int vpp)
-{
- return m8xx_pcmcia_ops.voltage_set(slot, vcc, vpp);
-}
-
-#endif
-
-#if defined(CONFIG_PRxK)
-#include <asm/cpld.h>
-extern volatile fpga_pc_regs *fpga_pc;
-
-#define PCMCIA_BOARD_MSG "MPC855T"
-
-static int voltage_set(int slot, int vcc, int vpp)
-{
- u8 reg = 0;
- u8 regread;
- cpld_regs *ccpld = get_cpld();
-
- switch (vcc) {
- case 0:
- break;
- case 33:
- reg |= PCMCIA_VCC_33;
- break;
- case 50:
- reg |= PCMCIA_VCC_50;
- break;
- default:
- return 1;
- }
-
- switch (vpp) {
- case 0:
- break;
- case 33:
- case 50:
- if (vcc == vpp)
- reg |= PCMCIA_VPP_VCC;
- else
- return 1;
- break;
- case 120:
- if ((vcc == 33) || (vcc == 50))
- reg |= PCMCIA_VPP_12;
- else
- return 1;
- default:
- return 1;
- }
-
- reg = reg >> (slot << 2);
- regread = in_8(&ccpld->fpga_pc_ctl);
- if (reg !=
- (regread & ((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >> (slot << 2)))) {
- /* enable new powersettings */
- regread =
- regread & ~((PCMCIA_VCC_MASK | PCMCIA_VPP_MASK) >>
- (slot << 2));
- out_8(&ccpld->fpga_pc_ctl, reg | regread);
- msleep(100);
- }
-
- return 0;
-}
-
-#define socket_get(_slot_) PCMCIA_SOCKET_KEY_LV
-#define hardware_enable(_slot_) /* No hardware to enable */
-#define hardware_disable(_slot_) /* No hardware to disable */
-
-#endif /* CONFIG_PRxK */
-
-static u32 pending_events[PCMCIA_SOCKETS_NO];
-static DEFINE_SPINLOCK(pending_event_lock);
-
-static irqreturn_t m8xx_interrupt(int irq, void *dev)
-{
- struct socket_info *s;
- struct event_table *e;
- unsigned int i, events, pscr, pipr, per;
- pcmconf8xx_t *pcmcia = socket[0].pcmcia;
-
- pr_debug("m8xx_pcmcia: Interrupt!\n");
- /* get interrupt sources */
-
- pscr = in_be32(&pcmcia->pcmc_pscr);
- pipr = in_be32(&pcmcia->pcmc_pipr);
- per = in_be32(&pcmcia->pcmc_per);
-
- for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
- s = &socket[i];
- e = &s->events[0];
- events = 0;
-
- while (e->regbit) {
- if (pscr & e->regbit)
- events |= e->eventbit;
-
- e++;
- }
-
- /*
- * report only if both card detect signals are the same
- * not too nice done,
- * we depend on that CD2 is the bit to the left of CD1...
- */
- if (events & SS_DETECT)
- if (((pipr & M8XX_PCMCIA_CD2(i)) >> 1) ^
- (pipr & M8XX_PCMCIA_CD1(i))) {
- events &= ~SS_DETECT;
- }
-#ifdef PCMCIA_GLITCHY_CD
- /*
- * I've experienced CD problems with my ADS board.
- * We make an extra check to see if there was a
- * real change of Card detection.
- */
-
- if ((events & SS_DETECT) &&
- ((pipr &
- (M8XX_PCMCIA_CD2(i) | M8XX_PCMCIA_CD1(i))) == 0) &&
- (s->state.Vcc | s->state.Vpp)) {
- events &= ~SS_DETECT;
- /*printk( "CD glitch workaround - CD = 0x%08x!\n",
- (pipr & (M8XX_PCMCIA_CD2(i)
- | M8XX_PCMCIA_CD1(i)))); */
- }
-#endif
-
- /* call the handler */
-
- pr_debug("m8xx_pcmcia: slot %u: events = 0x%02x, pscr = 0x%08x, "
- "pipr = 0x%08x\n", i, events, pscr, pipr);
-
- if (events) {
- spin_lock(&pending_event_lock);
- pending_events[i] |= events;
- spin_unlock(&pending_event_lock);
- /*
- * Turn off RDY_L bits in the PER mask on
- * CD interrupt receival.
- *
- * They can generate bad interrupts on the
- * ACS4,8,16,32. - marcelo
- */
- per &= ~M8XX_PCMCIA_RDY_L(0);
- per &= ~M8XX_PCMCIA_RDY_L(1);
-
- out_be32(&pcmcia->pcmc_per, per);
-
- if (events)
- pcmcia_parse_events(&socket[i].socket, events);
- }
- }
-
- /* clear the interrupt sources */
- out_be32(&pcmcia->pcmc_pscr, pscr);
-
- pr_debug("m8xx_pcmcia: Interrupt done.\n");
-
- return IRQ_HANDLED;
-}
-
-static u32 m8xx_get_graycode(u32 size)
-{
- u32 k;
-
- for (k = 0; k < M8XX_SIZES_NO; k++)
- if (m8xx_size_to_gray[k] == size)
- break;
-
- if ((k == M8XX_SIZES_NO) || (m8xx_size_to_gray[k] == -1))
- k = -1;
-
- return k;
-}
-
-static u32 m8xx_get_speed(u32 ns, u32 is_io, u32 bus_freq)
-{
- u32 reg, clocks, psst, psl, psht;
-
- if (!ns) {
-
- /*
- * We get called with IO maps setup to 0ns
- * if not specified by the user.
- * They should be 255ns.
- */
-
- if (is_io)
- ns = 255;
- else
- ns = 100; /* fast memory if 0 */
- }
-
- /*
- * In PSST, PSL, PSHT fields we tell the controller
- * timing parameters in CLKOUT clock cycles.
- * CLKOUT is the same as GCLK2_50.
- */
-
-/* how we want to adjust the timing - in percent */
-
-#define ADJ 180 /* 80 % longer accesstime - to be sure */
-
- clocks = ((bus_freq / 1000) * ns) / 1000;
- clocks = (clocks * ADJ) / (100 * 1000);
- if (clocks >= PCMCIA_BMT_LIMIT) {
- printk("Max access time limit reached\n");
- clocks = PCMCIA_BMT_LIMIT - 1;
- }
-
- psst = clocks / 7; /* setup time */
- psht = clocks / 7; /* hold time */
- psl = (clocks * 5) / 7; /* strobe length */
-
- psst += clocks - (psst + psht + psl);
-
- reg = psst << 12;
- reg |= psl << 7;
- reg |= psht << 16;
-
- return reg;
-}
-
-static int m8xx_get_status(struct pcmcia_socket *sock, unsigned int *value)
-{
- int lsock = container_of(sock, struct socket_info, socket)->slot;
- struct socket_info *s = &socket[lsock];
- unsigned int pipr, reg;
- pcmconf8xx_t *pcmcia = s->pcmcia;
-
- pipr = in_be32(&pcmcia->pcmc_pipr);
-
- *value = ((pipr & (M8XX_PCMCIA_CD1(lsock)
- | M8XX_PCMCIA_CD2(lsock))) == 0) ? SS_DETECT : 0;
- *value |= (pipr & M8XX_PCMCIA_WP(lsock)) ? SS_WRPROT : 0;
-
- if (s->state.flags & SS_IOCARD)
- *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_STSCHG : 0;
- else {
- *value |= (pipr & M8XX_PCMCIA_RDY(lsock)) ? SS_READY : 0;
- *value |= (pipr & M8XX_PCMCIA_BVD1(lsock)) ? SS_BATDEAD : 0;
- *value |= (pipr & M8XX_PCMCIA_BVD2(lsock)) ? SS_BATWARN : 0;
- }
-
- if (s->state.Vcc | s->state.Vpp)
- *value |= SS_POWERON;
-
- /*
- * Voltage detection:
- * This driver only supports 16-Bit pc-cards.
- * Cardbus is not handled here.
- *
- * To determine what voltage to use we must read the VS1 and VS2 pin.
- * Depending on what socket type is present,
- * different combinations mean different things.
- *
- * Card Key Socket Key VS1 VS2 Card Vcc for CIS parse
- *
- * 5V 5V, LV* NC NC 5V only 5V (if available)
- *
- * 5V 5V, LV* GND NC 5 or 3.3V as low as possible
- *
- * 5V 5V, LV* GND GND 5, 3.3, x.xV as low as possible
- *
- * LV* 5V - - shall not fit into socket
- *
- * LV* LV* GND NC 3.3V only 3.3V
- *
- * LV* LV* NC GND x.xV x.xV (if avail.)
- *
- * LV* LV* GND GND 3.3 or x.xV as low as possible
- *
- * *LV means Low Voltage
- *
- *
- * That gives us the following table:
- *
- * Socket VS1 VS2 Voltage
- *
- * 5V NC NC 5V
- * 5V NC GND none (should not be possible)
- * 5V GND NC >= 3.3V
- * 5V GND GND >= x.xV
- *
- * LV NC NC 5V (if available)
- * LV NC GND x.xV (if available)
- * LV GND NC 3.3V
- * LV GND GND >= x.xV
- *
- * So, how do I determine if I have a 5V or a LV
- * socket on my board? Look at the socket!
- *
- *
- * Socket with 5V key:
- * ++--------------------------------------------+
- * || |
- * || ||
- * || ||
- * | |
- * +---------------------------------------------+
- *
- * Socket with LV key:
- * ++--------------------------------------------+
- * || |
- * | ||
- * | ||
- * | |
- * +---------------------------------------------+
- *
- *
- * With other words - LV only cards does not fit
- * into the 5V socket!
- */
-
- /* read out VS1 and VS2 */
-
- reg = (pipr & M8XX_PCMCIA_VS_MASK(lsock))
- >> M8XX_PCMCIA_VS_SHIFT(lsock);
-
- if (socket_get(lsock) == PCMCIA_SOCKET_KEY_LV) {
- switch (reg) {
- case 1:
- *value |= SS_3VCARD;
- break; /* GND, NC - 3.3V only */
- case 2:
- *value |= SS_XVCARD;
- break; /* NC. GND - x.xV only */
- };
- }
-
- pr_debug("m8xx_pcmcia: GetStatus(%d) = %#2.2x\n", lsock, *value);
- return 0;
-}
-
-static int m8xx_set_socket(struct pcmcia_socket *sock, socket_state_t * state)
-{
- int lsock = container_of(sock, struct socket_info, socket)->slot;
- struct socket_info *s = &socket[lsock];
- struct event_table *e;
- unsigned int reg;
- unsigned long flags;
- pcmconf8xx_t *pcmcia = socket[0].pcmcia;
-
- pr_debug("m8xx_pcmcia: SetSocket(%d, flags %#3.3x, Vcc %d, Vpp %d, "
- "io_irq %d, csc_mask %#2.2x)\n", lsock, state->flags,
- state->Vcc, state->Vpp, state->io_irq, state->csc_mask);
-
- /* First, set voltage - bail out if invalid */
- if (voltage_set(lsock, state->Vcc, state->Vpp))
- return -EINVAL;
-
- /* Take care of reset... */
- if (state->flags & SS_RESET)
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXRESET); /* active high */
- else
- out_be32(M8XX_PGCRX(lsock),
- in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXRESET);
-
- /* ... and output enable. */
-
- /* The CxOE signal is connected to a 74541 on the ADS.
- I guess most other boards used the ADS as a reference.
- I tried to control the CxOE signal with SS_OUTPUT_ENA,
- but the reset signal seems connected via the 541.
- If the CxOE is left high are some signals tristated and
- no pullups are present -> the cards act weird.
- So right now the buffers are enabled if the power is on. */
-
- if (state->Vcc || state->Vpp)
- out_be32(M8XX_PGCRX(lsock), in_be32(M8XX_PGCRX(lsock)) & ~M8XX_PGCRX_CXOE); /* active low */
- else
- out_be32(M8XX_PGCRX(lsock),
- in_be32(M8XX_PGCRX(lsock)) | M8XX_PGCRX_CXOE);
-
- /*
- * We'd better turn off interrupts before
- * we mess with the events-table..
- */
-
- spin_lock_irqsave(&events_lock, flags);
-
- /*
- * Play around with the interrupt mask to be able to
- * give the events the generic pcmcia driver wants us to.
- */
-
- e = &s->events[0];
- reg = 0;
-
- if (state->csc_mask & SS_DETECT) {
- e->eventbit = SS_DETECT;
- reg |= e->regbit = (M8XX_PCMCIA_CD2(lsock)
- | M8XX_PCMCIA_CD1(lsock));
- e++;
- }
- if (state->flags & SS_IOCARD) {
- /*
- * I/O card
- */
- if (state->csc_mask & SS_STSCHG) {
- e->eventbit = SS_STSCHG;
- reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
- e++;
- }
- /*
- * If io_irq is non-zero we should enable irq.
- */
- if (state->io_irq) {
- out_be32(M8XX_PGCRX(lsock),
- in_be32(M8XX_PGCRX(lsock)) |
- mk_int_int_mask(s->hwirq) << 24);
- /*
- * Strange thing here:
- * The manual does not tell us which interrupt
- * the sources generate.
- * Anyhow, I found out that RDY_L generates IREQLVL.
- *
- * We use level triggerd interrupts, and they don't
- * have to be cleared in PSCR in the interrupt handler.
- */
- reg |= M8XX_PCMCIA_RDY_L(lsock);
- } else
- out_be32(M8XX_PGCRX(lsock),
- in_be32(M8XX_PGCRX(lsock)) & 0x00ffffff);
- } else {
- /*
- * Memory card
- */
- if (state->csc_mask & SS_BATDEAD) {
- e->eventbit = SS_BATDEAD;
- reg |= e->regbit = M8XX_PCMCIA_BVD1(lsock);
- e++;
- }
- if (state->csc_mask & SS_BATWARN) {
- e->eventbit = SS_BATWARN;
- reg |= e->regbit = M8XX_PCMCIA_BVD2(lsock);
- e++;
- }
- /* What should I trigger on - low/high,raise,fall? */
- if (state->csc_mask & SS_READY) {
- e->eventbit = SS_READY;
- reg |= e->regbit = 0; //??
- e++;
- }
- }
-
- e->regbit = 0; /* terminate list */
-
- /*
- * Clear the status changed .
- * Port A and Port B share the same port.
- * Writing ones will clear the bits.
- */
-
- out_be32(&pcmcia->pcmc_pscr, reg);
-
- /*
- * Write the mask.
- * Port A and Port B share the same port.
- * Need for read-modify-write.
- * Ones will enable the interrupt.
- */
-
- reg |=
- in_be32(&pcmcia->
- pcmc_per) & (M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
- out_be32(&pcmcia->pcmc_per, reg);
-
- spin_unlock_irqrestore(&events_lock, flags);
-
- /* copy the struct and modify the copy */
-
- s->state = *state;
-
- return 0;
-}
-
-static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io)
-{
- int lsock = container_of(sock, struct socket_info, socket)->slot;
-
- struct socket_info *s = &socket[lsock];
- struct pcmcia_win *w;
- unsigned int reg, winnr;
- pcmconf8xx_t *pcmcia = s->pcmcia;
-
-#define M8XX_SIZE (io->stop - io->start + 1)
-#define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start)
-
- pr_debug("m8xx_pcmcia: SetIOMap(%d, %d, %#2.2x, %d ns, "
- "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags,
- io->speed, (unsigned long long)io->start,
- (unsigned long long)io->stop);
-
- if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff)
- || (io->stop > 0xffff) || (io->stop < io->start))
- return -EINVAL;
-
- if ((reg = m8xx_get_graycode(M8XX_SIZE)) == -1)
- return -EINVAL;
-
- if (io->flags & MAP_ACTIVE) {
-
- pr_debug("m8xx_pcmcia: io->flags & MAP_ACTIVE\n");
-
- winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
- + (lsock * PCMCIA_IO_WIN_NO) + io->map;
-
- /* setup registers */
-
- w = (void *)&pcmcia->pcmc_pbr0;
- w += winnr;
-
- out_be32(&w->or, 0); /* turn off window first */
- out_be32(&w->br, M8XX_BASE);
-
- reg <<= 27;
- reg |= M8XX_PCMCIA_POR_IO | (lsock << 2);
-
- reg |= m8xx_get_speed(io->speed, 1, s->bus_freq);
-
- if (io->flags & MAP_WRPROT)
- reg |= M8XX_PCMCIA_POR_WRPROT;
-
- /*if(io->flags & (MAP_16BIT | MAP_AUTOSZ)) */
- if (io->flags & MAP_16BIT)
- reg |= M8XX_PCMCIA_POR_16BIT;
-
- if (io->flags & MAP_ACTIVE)
- reg |= M8XX_PCMCIA_POR_VALID;
-
- out_be32(&w->or, reg);
-
- pr_debug("m8xx_pcmcia: Socket %u: Mapped io window %u at "
- "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
- } else {
- /* shutdown IO window */
- winnr = (PCMCIA_MEM_WIN_NO * PCMCIA_SOCKETS_NO)
- + (lsock * PCMCIA_IO_WIN_NO) + io->map;
-
- /* setup registers */
-
- w = (void *)&pcmcia->pcmc_pbr0;
- w += winnr;
-
- out_be32(&w->or, 0); /* turn off window */
- out_be32(&w->br, 0); /* turn off base address */
-
- pr_debug("m8xx_pcmcia: Socket %u: Unmapped io window %u at "
- "%#8.8x, OR = %#8.8x.\n", lsock, io->map, w->br, w->or);
- }
-
- /* copy the struct and modify the copy */
- s->io_win[io->map] = *io;
- s->io_win[io->map].flags &= (MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
- pr_debug("m8xx_pcmcia: SetIOMap exit\n");
-
- return 0;
-}
-
-static int m8xx_set_mem_map(struct pcmcia_socket *sock,
- struct pccard_mem_map *mem)
-{
- int lsock = container_of(sock, struct socket_info, socket)->slot;
- struct socket_info *s = &socket[lsock];
- struct pcmcia_win *w;
- struct pccard_mem_map *old;
- unsigned int reg, winnr;
- pcmconf8xx_t *pcmcia = s->pcmcia;
-
- pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, "
- "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
- mem->speed, (unsigned long long)mem->static_start,
- mem->card_start);
-
- if ((mem->map >= PCMCIA_MEM_WIN_NO)
-// || ((mem->s) >= PCMCIA_MEM_WIN_SIZE)
- || (mem->card_start >= 0x04000000)
- || (mem->static_start & 0xfff) /* 4KByte resolution */
- ||(mem->card_start & 0xfff))
- return -EINVAL;
-
- if ((reg = m8xx_get_graycode(PCMCIA_MEM_WIN_SIZE)) == -1) {
- printk("Cannot set size to 0x%08x.\n", PCMCIA_MEM_WIN_SIZE);
- return -EINVAL;
- }
- reg <<= 27;
-
- winnr = (lsock * PCMCIA_MEM_WIN_NO) + mem->map;
-
- /* Setup the window in the pcmcia controller */
-
- w = (void *)&pcmcia->pcmc_pbr0;
- w += winnr;
-
- reg |= lsock << 2;
-
- reg |= m8xx_get_speed(mem->speed, 0, s->bus_freq);
-
- if (mem->flags & MAP_ATTRIB)
- reg |= M8XX_PCMCIA_POR_ATTRMEM;
-
- if (mem->flags & MAP_WRPROT)
- reg |= M8XX_PCMCIA_POR_WRPROT;
-
- if (mem->flags & MAP_16BIT)
- reg |= M8XX_PCMCIA_POR_16BIT;
-
- if (mem->flags & MAP_ACTIVE)
- reg |= M8XX_PCMCIA_POR_VALID;
-
- out_be32(&w->or, reg);
-
- pr_debug("m8xx_pcmcia: Socket %u: Mapped memory window %u at %#8.8x, "
- "OR = %#8.8x.\n", lsock, mem->map, w->br, w->or);
-
- if (mem->flags & MAP_ACTIVE) {
- /* get the new base address */
- mem->static_start = PCMCIA_MEM_WIN_BASE +
- (PCMCIA_MEM_WIN_SIZE * winnr)
- + mem->card_start;
- }
-
- pr_debug("m8xx_pcmcia: SetMemMap(%d, %d, %#2.2x, %d ns, "
- "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags,
- mem->speed, (unsigned long long)mem->static_start,
- mem->card_start);
-
- /* copy the struct and modify the copy */
-
- old = &s->mem_win[mem->map];
-
- *old = *mem;
- old->flags &= (MAP_ATTRIB | MAP_WRPROT | MAP_16BIT | MAP_ACTIVE);
-
- return 0;
-}
-
-static int m8xx_sock_init(struct pcmcia_socket *sock)
-{
- int i;
- pccard_io_map io = { 0, 0, 0, 0, 1 };
- pccard_mem_map mem = { 0, 0, 0, 0, 0, 0 };
-
- pr_debug("m8xx_pcmcia: sock_init(%d)\n", s);
-
- m8xx_set_socket(sock, &dead_socket);
- for (i = 0; i < PCMCIA_IO_WIN_NO; i++) {
- io.map = i;
- m8xx_set_io_map(sock, &io);
- }
- for (i = 0; i < PCMCIA_MEM_WIN_NO; i++) {
- mem.map = i;
- m8xx_set_mem_map(sock, &mem);
- }
-
- return 0;
-
-}
-
-static int m8xx_sock_suspend(struct pcmcia_socket *sock)
-{
- return m8xx_set_socket(sock, &dead_socket);
-}
-
-static struct pccard_operations m8xx_services = {
- .init = m8xx_sock_init,
- .suspend = m8xx_sock_suspend,
- .get_status = m8xx_get_status,
- .set_socket = m8xx_set_socket,
- .set_io_map = m8xx_set_io_map,
- .set_mem_map = m8xx_set_mem_map,
-};
-
-static int __init m8xx_probe(struct platform_device *ofdev)
-{
- struct pcmcia_win *w;
- unsigned int i, m, hwirq;
- pcmconf8xx_t *pcmcia;
- int status;
- struct device_node *np = ofdev->dev.of_node;
-
- pcmcia_info("%s\n", version);
-
- pcmcia = of_iomap(np, 0);
- if (pcmcia == NULL)
- return -EINVAL;
-
- pcmcia_schlvl = irq_of_parse_and_map(np, 0);
- hwirq = irq_map[pcmcia_schlvl].hwirq;
- if (pcmcia_schlvl < 0) {
- iounmap(pcmcia);
- return -EINVAL;
- }
-
- m8xx_pgcrx[0] = &pcmcia->pcmc_pgcra;
- m8xx_pgcrx[1] = &pcmcia->pcmc_pgcrb;
-
- pcmcia_info(PCMCIA_BOARD_MSG " using " PCMCIA_SLOT_MSG
- " with IRQ %u (%d). \n", pcmcia_schlvl, hwirq);
-
- /* Configure Status change interrupt */
-
- if (request_irq(pcmcia_schlvl, m8xx_interrupt, IRQF_SHARED,
- driver_name, socket)) {
- pcmcia_error("Cannot allocate IRQ %u for SCHLVL!\n",
- pcmcia_schlvl);
- iounmap(pcmcia);
- return -1;
- }
-
- w = (void *)&pcmcia->pcmc_pbr0;
-
- out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
- clrbits32(&pcmcia->pcmc_per, M8XX_PCMCIA_MASK(0) | M8XX_PCMCIA_MASK(1));
-
- /* connect interrupt and disable CxOE */
-
- out_be32(M8XX_PGCRX(0),
- M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
- out_be32(M8XX_PGCRX(1),
- M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
-
- /* initialize the fixed memory windows */
-
- for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
- for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
- out_be32(&w->br, PCMCIA_MEM_WIN_BASE +
- (PCMCIA_MEM_WIN_SIZE
- * (m + i * PCMCIA_MEM_WIN_NO)));
-
- out_be32(&w->or, 0); /* set to not valid */
-
- w++;
- }
- }
-
- /* turn off voltage */
- voltage_set(0, 0, 0);
- voltage_set(1, 0, 0);
-
- /* Enable external hardware */
- hardware_enable(0);
- hardware_enable(1);
-
- for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
- socket[i].slot = i;
- socket[i].socket.owner = THIS_MODULE;
- socket[i].socket.features =
- SS_CAP_PCCARD | SS_CAP_MEM_ALIGN | SS_CAP_STATIC_MAP;
- socket[i].socket.irq_mask = 0x000;
- socket[i].socket.map_size = 0x1000;
- socket[i].socket.io_offset = 0;
- socket[i].socket.pci_irq = pcmcia_schlvl;
- socket[i].socket.ops = &m8xx_services;
- socket[i].socket.resource_ops = &pccard_iodyn_ops;
- socket[i].socket.cb_dev = NULL;
- socket[i].socket.dev.parent = &ofdev->dev;
- socket[i].pcmcia = pcmcia;
- socket[i].bus_freq = ppc_proc_freq;
- socket[i].hwirq = hwirq;
-
- }
-
- for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
- status = pcmcia_register_socket(&socket[i].socket);
- if (status < 0)
- pcmcia_error("Socket register failed\n");
- }
-
- return 0;
-}
-
-static int m8xx_remove(struct platform_device *ofdev)
-{
- u32 m, i;
- struct pcmcia_win *w;
- pcmconf8xx_t *pcmcia = socket[0].pcmcia;
-
- for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
- w = (void *)&pcmcia->pcmc_pbr0;
-
- out_be32(&pcmcia->pcmc_pscr, M8XX_PCMCIA_MASK(i));
- out_be32(&pcmcia->pcmc_per,
- in_be32(&pcmcia->pcmc_per) & ~M8XX_PCMCIA_MASK(i));
-
- /* turn off interrupt and disable CxOE */
- out_be32(M8XX_PGCRX(i), M8XX_PGCRX_CXOE);
-
- /* turn off memory windows */
- for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
- out_be32(&w->or, 0); /* set to not valid */
- w++;
- }
-
- /* turn off voltage */
- voltage_set(i, 0, 0);
-
- /* disable external hardware */
- hardware_disable(i);
- }
- for (i = 0; i < PCMCIA_SOCKETS_NO; i++)
- pcmcia_unregister_socket(&socket[i].socket);
- iounmap(pcmcia);
-
- free_irq(pcmcia_schlvl, NULL);
-
- return 0;
-}
-
-static const struct of_device_id m8xx_pcmcia_match[] = {
- {
- .type = "pcmcia",
- .compatible = "fsl,pq-pcmcia",
- },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, m8xx_pcmcia_match);
-
-static struct platform_driver m8xx_pcmcia_driver = {
- .driver = {
- .name = driver_name,
- .owner = THIS_MODULE,
- .of_match_table = m8xx_pcmcia_match,
- },
- .probe = m8xx_probe,
- .remove = m8xx_remove,
-};
-
-module_platform_driver(m8xx_pcmcia_driver);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index cc97c897945a..0dd742719154 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -159,6 +159,16 @@ config PHY_SAMSUNG_USB2
for particular PHYs will be enabled based on the SoC type in addition
to this driver.
+config PHY_S5PV210_USB2
+ bool "Support for S5PV210"
+ depends on PHY_SAMSUNG_USB2
+ depends on ARCH_S5PV210
+ help
+ Enable USB PHY support for S5PV210. This option requires that Samsung
+ USB 2.0 PHY driver is enabled and means that support for this
+ particular SoC is compiled in the driver. In case of S5PV210 two phys
+ are available - device and host.
+
config PHY_EXYNOS4210_USB2
bool
depends on PHY_SAMSUNG_USB2
@@ -187,13 +197,6 @@ config PHY_EXYNOS5_USBDRD
This driver provides PHY interface for USB 3.0 DRD controller
present on Exynos5 SoC series.
-config PHY_XGENE
- tristate "APM X-Gene 15Gbps PHY support"
- depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST)
- select GENERIC_PHY
- help
- This option enables support for APM X-Gene SoC multi-purpose PHY.
-
config PHY_QCOM_APQ8064_SATA
tristate "Qualcomm APQ8064 SATA SerDes/PHY driver"
depends on ARCH_QCOM
@@ -208,4 +211,23 @@ config PHY_QCOM_IPQ806X_SATA
depends on OF
select GENERIC_PHY
+config PHY_ST_SPEAR1310_MIPHY
+ tristate "ST SPEAR1310-MIPHY driver"
+ select GENERIC_PHY
+ help
+ Support for ST SPEAr1310 MIPHY which can be used for PCIe and SATA.
+
+config PHY_ST_SPEAR1340_MIPHY
+ tristate "ST SPEAR1340-MIPHY driver"
+ select GENERIC_PHY
+ help
+ Support for ST SPEAr1340 MIPHY which can be used for PCIe and SATA.
+
+config PHY_XGENE
+ tristate "APM X-Gene 15Gbps PHY support"
+ depends on HAS_IOMEM && OF && (ARM64 || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for APM X-Gene SoC multi-purpose PHY.
+
endmenu
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 971ad0aac388..95c69ed5ed45 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -21,7 +21,10 @@ phy-exynos-usb2-y += phy-samsung-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4210_USB2) += phy-exynos4210-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS4X12_USB2) += phy-exynos4x12-usb2.o
phy-exynos-usb2-$(CONFIG_PHY_EXYNOS5250_USB2) += phy-exynos5250-usb2.o
+phy-exynos-usb2-$(CONFIG_PHY_S5PV210_USB2) += phy-s5pv210-usb2.o
obj-$(CONFIG_PHY_EXYNOS5_USBDRD) += phy-exynos5-usbdrd.o
-obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
+obj-$(CONFIG_PHY_ST_SPEAR1310_MIPHY) += phy-spear1310-miphy.o
+obj-$(CONFIG_PHY_ST_SPEAR1340_MIPHY) += phy-spear1340-miphy.o
+obj-$(CONFIG_PHY_XGENE) += phy-xgene.o
diff --git a/drivers/phy/phy-s5pv210-usb2.c b/drivers/phy/phy-s5pv210-usb2.c
new file mode 100644
index 000000000000..004d320767e4
--- /dev/null
+++ b/drivers/phy/phy-s5pv210-usb2.c
@@ -0,0 +1,187 @@
+/*
+ * Samsung SoC USB 1.1/2.0 PHY driver - S5PV210 support
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * Authors: Kamil Debski <k.debski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/phy/phy.h>
+#include "phy-samsung-usb2.h"
+
+/* Exynos USB PHY registers */
+
+/* PHY power control */
+#define S5PV210_UPHYPWR 0x0
+
+#define S5PV210_UPHYPWR_PHY0_SUSPEND BIT(0)
+#define S5PV210_UPHYPWR_PHY0_PWR BIT(3)
+#define S5PV210_UPHYPWR_PHY0_OTG_PWR BIT(4)
+#define S5PV210_UPHYPWR_PHY0 ( \
+ S5PV210_UPHYPWR_PHY0_SUSPEND | \
+ S5PV210_UPHYPWR_PHY0_PWR | \
+ S5PV210_UPHYPWR_PHY0_OTG_PWR)
+
+#define S5PV210_UPHYPWR_PHY1_SUSPEND BIT(6)
+#define S5PV210_UPHYPWR_PHY1_PWR BIT(7)
+#define S5PV210_UPHYPWR_PHY1 ( \
+ S5PV210_UPHYPWR_PHY1_SUSPEND | \
+ S5PV210_UPHYPWR_PHY1_PWR)
+
+/* PHY clock control */
+#define S5PV210_UPHYCLK 0x4
+
+#define S5PV210_UPHYCLK_PHYFSEL_MASK (0x3 << 0)
+#define S5PV210_UPHYCLK_PHYFSEL_48MHZ (0x0 << 0)
+#define S5PV210_UPHYCLK_PHYFSEL_24MHZ (0x3 << 0)
+#define S5PV210_UPHYCLK_PHYFSEL_12MHZ (0x2 << 0)
+
+#define S5PV210_UPHYCLK_PHY0_ID_PULLUP BIT(2)
+#define S5PV210_UPHYCLK_PHY0_COMMON_ON BIT(4)
+#define S5PV210_UPHYCLK_PHY1_COMMON_ON BIT(7)
+
+/* PHY reset control */
+#define S5PV210_UPHYRST 0x8
+
+#define S5PV210_URSTCON_PHY0 BIT(0)
+#define S5PV210_URSTCON_OTG_HLINK BIT(1)
+#define S5PV210_URSTCON_OTG_PHYLINK BIT(2)
+#define S5PV210_URSTCON_PHY1_ALL BIT(3)
+#define S5PV210_URSTCON_HOST_LINK_ALL BIT(4)
+
+/* Isolation, configured in the power management unit */
+#define S5PV210_USB_ISOL_OFFSET 0x680c
+#define S5PV210_USB_ISOL_DEVICE BIT(0)
+#define S5PV210_USB_ISOL_HOST BIT(1)
+
+
+enum s5pv210_phy_id {
+ S5PV210_DEVICE,
+ S5PV210_HOST,
+ S5PV210_NUM_PHYS,
+};
+
+/*
+ * s5pv210_rate_to_clk() converts the supplied clock rate to the value that
+ * can be written to the phy register.
+ */
+static int s5pv210_rate_to_clk(unsigned long rate, u32 *reg)
+{
+ switch (rate) {
+ case 12 * MHZ:
+ *reg = S5PV210_UPHYCLK_PHYFSEL_12MHZ;
+ break;
+ case 24 * MHZ:
+ *reg = S5PV210_UPHYCLK_PHYFSEL_24MHZ;
+ break;
+ case 48 * MHZ:
+ *reg = S5PV210_UPHYCLK_PHYFSEL_48MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void s5pv210_isol(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 mask;
+
+ switch (inst->cfg->id) {
+ case S5PV210_DEVICE:
+ mask = S5PV210_USB_ISOL_DEVICE;
+ break;
+ case S5PV210_HOST:
+ mask = S5PV210_USB_ISOL_HOST;
+ break;
+ default:
+ return;
+ };
+
+ regmap_update_bits(drv->reg_pmu, S5PV210_USB_ISOL_OFFSET,
+ mask, on ? 0 : mask);
+}
+
+static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
+{
+ struct samsung_usb2_phy_driver *drv = inst->drv;
+ u32 rstbits = 0;
+ u32 phypwr = 0;
+ u32 rst;
+ u32 pwr;
+
+ switch (inst->cfg->id) {
+ case S5PV210_DEVICE:
+ phypwr = S5PV210_UPHYPWR_PHY0;
+ rstbits = S5PV210_URSTCON_PHY0;
+ break;
+ case S5PV210_HOST:
+ phypwr = S5PV210_UPHYPWR_PHY1;
+ rstbits = S5PV210_URSTCON_PHY1_ALL |
+ S5PV210_URSTCON_HOST_LINK_ALL;
+ break;
+ };
+
+ if (on) {
+ writel(drv->ref_reg_val, drv->reg_phy + S5PV210_UPHYCLK);
+
+ pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
+ pwr &= ~phypwr;
+ writel(pwr, drv->reg_phy + S5PV210_UPHYPWR);
+
+ rst = readl(drv->reg_phy + S5PV210_UPHYRST);
+ rst |= rstbits;
+ writel(rst, drv->reg_phy + S5PV210_UPHYRST);
+ udelay(10);
+ rst &= ~rstbits;
+ writel(rst, drv->reg_phy + S5PV210_UPHYRST);
+ } else {
+ pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
+ pwr |= phypwr;
+ writel(pwr, drv->reg_phy + S5PV210_UPHYPWR);
+ }
+}
+
+static int s5pv210_power_on(struct samsung_usb2_phy_instance *inst)
+{
+ s5pv210_isol(inst, 0);
+ s5pv210_phy_pwr(inst, 1);
+
+ return 0;
+}
+
+static int s5pv210_power_off(struct samsung_usb2_phy_instance *inst)
+{
+ s5pv210_phy_pwr(inst, 0);
+ s5pv210_isol(inst, 1);
+
+ return 0;
+}
+
+static const struct samsung_usb2_common_phy s5pv210_phys[S5PV210_NUM_PHYS] = {
+ [S5PV210_DEVICE] = {
+ .label = "device",
+ .id = S5PV210_DEVICE,
+ .power_on = s5pv210_power_on,
+ .power_off = s5pv210_power_off,
+ },
+ [S5PV210_HOST] = {
+ .label = "host",
+ .id = S5PV210_HOST,
+ .power_on = s5pv210_power_on,
+ .power_off = s5pv210_power_off,
+ },
+};
+
+const struct samsung_usb2_phy_config s5pv210_usb2_phy_config = {
+ .num_phys = ARRAY_SIZE(s5pv210_phys),
+ .phys = s5pv210_phys,
+ .rate_to_clk = s5pv210_rate_to_clk,
+};
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index ae30640a411d..3732ca25e09f 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -111,6 +111,12 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
.data = &exynos5250_usb2_phy_config,
},
#endif
+#ifdef CONFIG_PHY_S5PV210_USB2
+ {
+ .compatible = "samsung,s5pv210-usb2-phy",
+ .data = &s5pv210_usb2_phy_config,
+ },
+#endif
{ },
};
MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match);
diff --git a/drivers/phy/phy-samsung-usb2.h b/drivers/phy/phy-samsung-usb2.h
index b03da0ef39ac..44bead9b8f34 100644
--- a/drivers/phy/phy-samsung-usb2.h
+++ b/drivers/phy/phy-samsung-usb2.h
@@ -67,4 +67,5 @@ extern const struct samsung_usb2_phy_config exynos3250_usb2_phy_config;
extern const struct samsung_usb2_phy_config exynos4210_usb2_phy_config;
extern const struct samsung_usb2_phy_config exynos4x12_usb2_phy_config;
extern const struct samsung_usb2_phy_config exynos5250_usb2_phy_config;
+extern const struct samsung_usb2_phy_config s5pv210_usb2_phy_config;
#endif
diff --git a/drivers/phy/phy-spear1310-miphy.c b/drivers/phy/phy-spear1310-miphy.c
new file mode 100644
index 000000000000..6dcbfcddb372
--- /dev/null
+++ b/drivers/phy/phy-spear1310-miphy.c
@@ -0,0 +1,274 @@
+/*
+ * ST SPEAr1310-miphy driver
+ *
+ * Copyright (C) 2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@st.com>
+ * Mohit Kumar <mohit.kumar@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+
+/* SPEAr1310 Registers */
+#define SPEAR1310_PCIE_SATA_CFG 0x3A4
+ #define SPEAR1310_PCIE_SATA2_SEL_PCIE (0 << 31)
+ #define SPEAR1310_PCIE_SATA1_SEL_PCIE (0 << 30)
+ #define SPEAR1310_PCIE_SATA0_SEL_PCIE (0 << 29)
+ #define SPEAR1310_PCIE_SATA2_SEL_SATA BIT(31)
+ #define SPEAR1310_PCIE_SATA1_SEL_SATA BIT(30)
+ #define SPEAR1310_PCIE_SATA0_SEL_SATA BIT(29)
+ #define SPEAR1310_SATA2_CFG_TX_CLK_EN BIT(27)
+ #define SPEAR1310_SATA2_CFG_RX_CLK_EN BIT(26)
+ #define SPEAR1310_SATA2_CFG_POWERUP_RESET BIT(25)
+ #define SPEAR1310_SATA2_CFG_PM_CLK_EN BIT(24)
+ #define SPEAR1310_SATA1_CFG_TX_CLK_EN BIT(23)
+ #define SPEAR1310_SATA1_CFG_RX_CLK_EN BIT(22)
+ #define SPEAR1310_SATA1_CFG_POWERUP_RESET BIT(21)
+ #define SPEAR1310_SATA1_CFG_PM_CLK_EN BIT(20)
+ #define SPEAR1310_SATA0_CFG_TX_CLK_EN BIT(19)
+ #define SPEAR1310_SATA0_CFG_RX_CLK_EN BIT(18)
+ #define SPEAR1310_SATA0_CFG_POWERUP_RESET BIT(17)
+ #define SPEAR1310_SATA0_CFG_PM_CLK_EN BIT(16)
+ #define SPEAR1310_PCIE2_CFG_DEVICE_PRESENT BIT(11)
+ #define SPEAR1310_PCIE2_CFG_POWERUP_RESET BIT(10)
+ #define SPEAR1310_PCIE2_CFG_CORE_CLK_EN BIT(9)
+ #define SPEAR1310_PCIE2_CFG_AUX_CLK_EN BIT(8)
+ #define SPEAR1310_PCIE1_CFG_DEVICE_PRESENT BIT(7)
+ #define SPEAR1310_PCIE1_CFG_POWERUP_RESET BIT(6)
+ #define SPEAR1310_PCIE1_CFG_CORE_CLK_EN BIT(5)
+ #define SPEAR1310_PCIE1_CFG_AUX_CLK_EN BIT(4)
+ #define SPEAR1310_PCIE0_CFG_DEVICE_PRESENT BIT(3)
+ #define SPEAR1310_PCIE0_CFG_POWERUP_RESET BIT(2)
+ #define SPEAR1310_PCIE0_CFG_CORE_CLK_EN BIT(1)
+ #define SPEAR1310_PCIE0_CFG_AUX_CLK_EN BIT(0)
+
+ #define SPEAR1310_PCIE_CFG_MASK(x) ((0xF << (x * 4)) | BIT((x + 29)))
+ #define SPEAR1310_SATA_CFG_MASK(x) ((0xF << (x * 4 + 16)) | \
+ BIT((x + 29)))
+ #define SPEAR1310_PCIE_CFG_VAL(x) \
+ (SPEAR1310_PCIE_SATA##x##_SEL_PCIE | \
+ SPEAR1310_PCIE##x##_CFG_AUX_CLK_EN | \
+ SPEAR1310_PCIE##x##_CFG_CORE_CLK_EN | \
+ SPEAR1310_PCIE##x##_CFG_POWERUP_RESET | \
+ SPEAR1310_PCIE##x##_CFG_DEVICE_PRESENT)
+ #define SPEAR1310_SATA_CFG_VAL(x) \
+ (SPEAR1310_PCIE_SATA##x##_SEL_SATA | \
+ SPEAR1310_SATA##x##_CFG_PM_CLK_EN | \
+ SPEAR1310_SATA##x##_CFG_POWERUP_RESET | \
+ SPEAR1310_SATA##x##_CFG_RX_CLK_EN | \
+ SPEAR1310_SATA##x##_CFG_TX_CLK_EN)
+
+#define SPEAR1310_PCIE_MIPHY_CFG_1 0x3A8
+ #define SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT BIT(31)
+ #define SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 BIT(28)
+ #define SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(x) (x << 16)
+ #define SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT BIT(15)
+ #define SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 BIT(12)
+ #define SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(x) (x << 0)
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_MASK (0xFFFF)
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK (0xFFFF << 16)
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA \
+ (SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_DUAL_CLK_REF_DIV2 | \
+ SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(60) | \
+ SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_SINGLE_CLK_REF_DIV2 | \
+ SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(60))
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+ (SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(120))
+ #define SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE \
+ (SPEAR1310_MIPHY_DUAL_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_DUAL_PLL_RATIO_TOP(25) | \
+ SPEAR1310_MIPHY_SINGLE_OSC_BYPASS_EXT | \
+ SPEAR1310_MIPHY_SINGLE_PLL_RATIO_TOP(25))
+
+#define SPEAR1310_PCIE_MIPHY_CFG_2 0x3AC
+
+enum spear1310_miphy_mode {
+ SATA,
+ PCIE,
+};
+
+struct spear1310_miphy_priv {
+ /* instance id of this phy */
+ u32 id;
+ /* phy mode: 0 for SATA 1 for PCIe */
+ enum spear1310_miphy_mode mode;
+ /* regmap for any soc specific misc registers */
+ struct regmap *misc;
+ /* phy struct pointer */
+ struct phy *phy;
+};
+
+static int spear1310_miphy_pcie_init(struct spear1310_miphy_priv *priv)
+{
+ u32 val;
+
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1,
+ SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK,
+ SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE);
+
+ switch (priv->id) {
+ case 0:
+ val = SPEAR1310_PCIE_CFG_VAL(0);
+ break;
+ case 1:
+ val = SPEAR1310_PCIE_CFG_VAL(1);
+ break;
+ case 2:
+ val = SPEAR1310_PCIE_CFG_VAL(2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG,
+ SPEAR1310_PCIE_CFG_MASK(priv->id), val);
+
+ return 0;
+}
+
+static int spear1310_miphy_pcie_exit(struct spear1310_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_SATA_CFG,
+ SPEAR1310_PCIE_CFG_MASK(priv->id), 0);
+
+ regmap_update_bits(priv->misc, SPEAR1310_PCIE_MIPHY_CFG_1,
+ SPEAR1310_PCIE_SATA_MIPHY_CFG_PCIE_MASK, 0);
+
+ return 0;
+}
+
+static int spear1310_miphy_init(struct phy *phy)
+{
+ struct spear1310_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == PCIE)
+ ret = spear1310_miphy_pcie_init(priv);
+
+ return ret;
+}
+
+static int spear1310_miphy_exit(struct phy *phy)
+{
+ struct spear1310_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == PCIE)
+ ret = spear1310_miphy_pcie_exit(priv);
+
+ return ret;
+}
+
+static const struct of_device_id spear1310_miphy_of_match[] = {
+ { .compatible = "st,spear1310-miphy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spear1310_miphy_of_match);
+
+static struct phy_ops spear1310_miphy_ops = {
+ .init = spear1310_miphy_init,
+ .exit = spear1310_miphy_exit,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *spear1310_miphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct spear1310_miphy_priv *priv = dev_get_drvdata(dev);
+
+ if (args->args_count < 1) {
+ dev_err(dev, "DT did not pass correct no of args\n");
+ return NULL;
+ }
+
+ priv->mode = args->args[0];
+
+ if (priv->mode != SATA && priv->mode != PCIE) {
+ dev_err(dev, "DT did not pass correct phy mode\n");
+ return NULL;
+ }
+
+ return priv->phy;
+}
+
+static int spear1310_miphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spear1310_miphy_priv *priv;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "can't alloc spear1310_miphy private date memory\n");
+ return -ENOMEM;
+ }
+
+ priv->misc =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "misc");
+ if (IS_ERR(priv->misc)) {
+ dev_err(dev, "failed to find misc regmap\n");
+ return PTR_ERR(priv->misc);
+ }
+
+ if (of_property_read_u32(dev->of_node, "phy-id", &priv->id)) {
+ dev_err(dev, "failed to find phy id\n");
+ return -EINVAL;
+ }
+
+ priv->phy = devm_phy_create(dev, NULL, &spear1310_miphy_ops, NULL);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create SATA PCIe PHY\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ dev_set_drvdata(dev, priv);
+ phy_set_drvdata(priv->phy, priv);
+
+ phy_provider =
+ devm_of_phy_provider_register(dev, spear1310_miphy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static struct platform_driver spear1310_miphy_driver = {
+ .probe = spear1310_miphy_probe,
+ .driver = {
+ .name = "spear1310-miphy",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spear1310_miphy_of_match),
+ },
+};
+
+static int __init spear1310_miphy_phy_init(void)
+{
+ return platform_driver_register(&spear1310_miphy_driver);
+}
+module_init(spear1310_miphy_phy_init);
+
+static void __exit spear1310_miphy_phy_exit(void)
+{
+ platform_driver_unregister(&spear1310_miphy_driver);
+}
+module_exit(spear1310_miphy_phy_exit);
+
+MODULE_DESCRIPTION("ST SPEAR1310-MIPHY driver");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-spear1340-miphy.c b/drivers/phy/phy-spear1340-miphy.c
new file mode 100644
index 000000000000..7135ba2603b6
--- /dev/null
+++ b/drivers/phy/phy-spear1340-miphy.c
@@ -0,0 +1,307 @@
+/*
+ * ST spear1340-miphy driver
+ *
+ * Copyright (C) 2014 ST Microelectronics
+ * Pratyush Anand <pratyush.anand@st.com>
+ * Mohit Kumar <mohit.kumar@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+
+/* SPEAr1340 Registers */
+/* Power Management Registers */
+#define SPEAR1340_PCM_CFG 0x100
+ #define SPEAR1340_PCM_CFG_SATA_POWER_EN BIT(11)
+#define SPEAR1340_PCM_WKUP_CFG 0x104
+#define SPEAR1340_SWITCH_CTR 0x108
+
+#define SPEAR1340_PERIP1_SW_RST 0x318
+ #define SPEAR1340_PERIP1_SW_RSATA BIT(12)
+#define SPEAR1340_PERIP2_SW_RST 0x31C
+#define SPEAR1340_PERIP3_SW_RST 0x320
+
+/* PCIE - SATA configuration registers */
+#define SPEAR1340_PCIE_SATA_CFG 0x424
+ /* PCIE CFG MASks */
+ #define SPEAR1340_PCIE_CFG_DEVICE_PRESENT BIT(11)
+ #define SPEAR1340_PCIE_CFG_POWERUP_RESET BIT(10)
+ #define SPEAR1340_PCIE_CFG_CORE_CLK_EN BIT(9)
+ #define SPEAR1340_PCIE_CFG_AUX_CLK_EN BIT(8)
+ #define SPEAR1340_SATA_CFG_TX_CLK_EN BIT(4)
+ #define SPEAR1340_SATA_CFG_RX_CLK_EN BIT(3)
+ #define SPEAR1340_SATA_CFG_POWERUP_RESET BIT(2)
+ #define SPEAR1340_SATA_CFG_PM_CLK_EN BIT(1)
+ #define SPEAR1340_PCIE_SATA_SEL_PCIE (0)
+ #define SPEAR1340_PCIE_SATA_SEL_SATA (1)
+ #define SPEAR1340_PCIE_SATA_CFG_MASK 0xF1F
+ #define SPEAR1340_PCIE_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_PCIE | \
+ SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
+ SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
+ SPEAR1340_PCIE_CFG_POWERUP_RESET | \
+ SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
+ #define SPEAR1340_SATA_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_SATA | \
+ SPEAR1340_SATA_CFG_PM_CLK_EN | \
+ SPEAR1340_SATA_CFG_POWERUP_RESET | \
+ SPEAR1340_SATA_CFG_RX_CLK_EN | \
+ SPEAR1340_SATA_CFG_TX_CLK_EN)
+
+#define SPEAR1340_PCIE_MIPHY_CFG 0x428
+ #define SPEAR1340_MIPHY_OSC_BYPASS_EXT BIT(31)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV2 BIT(27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV4 (2 << 27)
+ #define SPEAR1340_MIPHY_CLK_REF_DIV8 (3 << 27)
+ #define SPEAR1340_MIPHY_PLL_RATIO_TOP(x) (x << 0)
+ #define SPEAR1340_PCIE_MIPHY_CFG_MASK 0xF80000FF
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_CLK_REF_DIV2 | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
+ (SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
+ #define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
+ (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
+ SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
+
+enum spear1340_miphy_mode {
+ SATA,
+ PCIE,
+};
+
+struct spear1340_miphy_priv {
+ /* phy mode: 0 for SATA 1 for PCIe */
+ enum spear1340_miphy_mode mode;
+ /* regmap for any soc specific misc registers */
+ struct regmap *misc;
+ /* phy struct pointer */
+ struct phy *phy;
+};
+
+static int spear1340_miphy_sata_init(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK,
+ SPEAR1340_SATA_CFG_VAL);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK,
+ SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK);
+ /* Switch on sata power domain */
+ regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG,
+ SPEAR1340_PCM_CFG_SATA_POWER_EN,
+ SPEAR1340_PCM_CFG_SATA_POWER_EN);
+ /* Wait for SATA power domain on */
+ msleep(20);
+
+ /* Disable PCIE SATA Controller reset */
+ regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST,
+ SPEAR1340_PERIP1_SW_RSATA, 0);
+ /* Wait for SATA reset de-assert completion */
+ msleep(20);
+
+ return 0;
+}
+
+static int spear1340_miphy_sata_exit(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK, 0);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK, 0);
+
+ /* Enable PCIE SATA Controller reset */
+ regmap_update_bits(priv->misc, SPEAR1340_PERIP1_SW_RST,
+ SPEAR1340_PERIP1_SW_RSATA,
+ SPEAR1340_PERIP1_SW_RSATA);
+ /* Wait for SATA power domain off */
+ msleep(20);
+ /* Switch off sata power domain */
+ regmap_update_bits(priv->misc, SPEAR1340_PCM_CFG,
+ SPEAR1340_PCM_CFG_SATA_POWER_EN, 0);
+ /* Wait for SATA reset assert completion */
+ msleep(20);
+
+ return 0;
+}
+
+static int spear1340_miphy_pcie_init(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK,
+ SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK,
+ SPEAR1340_PCIE_CFG_VAL);
+
+ return 0;
+}
+
+static int spear1340_miphy_pcie_exit(struct spear1340_miphy_priv *priv)
+{
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_MIPHY_CFG,
+ SPEAR1340_PCIE_MIPHY_CFG_MASK, 0);
+ regmap_update_bits(priv->misc, SPEAR1340_PCIE_SATA_CFG,
+ SPEAR1340_PCIE_SATA_CFG_MASK, 0);
+
+ return 0;
+}
+
+static int spear1340_miphy_init(struct phy *phy)
+{
+ struct spear1340_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_init(priv);
+ else if (priv->mode == PCIE)
+ ret = spear1340_miphy_pcie_init(priv);
+
+ return ret;
+}
+
+static int spear1340_miphy_exit(struct phy *phy)
+{
+ struct spear1340_miphy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_exit(priv);
+ else if (priv->mode == PCIE)
+ ret = spear1340_miphy_pcie_exit(priv);
+
+ return ret;
+}
+
+static const struct of_device_id spear1340_miphy_of_match[] = {
+ { .compatible = "st,spear1340-miphy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spear1340_miphy_of_match);
+
+static struct phy_ops spear1340_miphy_ops = {
+ .init = spear1340_miphy_init,
+ .exit = spear1340_miphy_exit,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int spear1340_miphy_suspend(struct device *dev)
+{
+ struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_exit(priv);
+
+ return ret;
+}
+
+static int spear1340_miphy_resume(struct device *dev)
+{
+ struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (priv->mode == SATA)
+ ret = spear1340_miphy_sata_init(priv);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spear1340_miphy_pm_ops, spear1340_miphy_suspend,
+ spear1340_miphy_resume);
+
+static struct phy *spear1340_miphy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct spear1340_miphy_priv *priv = dev_get_drvdata(dev);
+
+ if (args->args_count < 1) {
+ dev_err(dev, "DT did not pass correct no of args\n");
+ return NULL;
+ }
+
+ priv->mode = args->args[0];
+
+ if (priv->mode != SATA && priv->mode != PCIE) {
+ dev_err(dev, "DT did not pass correct phy mode\n");
+ return NULL;
+ }
+
+ return priv->phy;
+}
+
+static int spear1340_miphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spear1340_miphy_priv *priv;
+ struct phy_provider *phy_provider;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "can't alloc spear1340_miphy private date memory\n");
+ return -ENOMEM;
+ }
+
+ priv->misc =
+ syscon_regmap_lookup_by_phandle(dev->of_node, "misc");
+ if (IS_ERR(priv->misc)) {
+ dev_err(dev, "failed to find misc regmap\n");
+ return PTR_ERR(priv->misc);
+ }
+
+ priv->phy = devm_phy_create(dev, NULL, &spear1340_miphy_ops, NULL);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "failed to create SATA PCIe PHY\n");
+ return PTR_ERR(priv->phy);
+ }
+
+ dev_set_drvdata(dev, priv);
+ phy_set_drvdata(priv->phy, priv);
+
+ phy_provider =
+ devm_of_phy_provider_register(dev, spear1340_miphy_xlate);
+ if (IS_ERR(phy_provider)) {
+ dev_err(dev, "failed to register phy provider\n");
+ return PTR_ERR(phy_provider);
+ }
+
+ return 0;
+}
+
+static struct platform_driver spear1340_miphy_driver = {
+ .probe = spear1340_miphy_probe,
+ .driver = {
+ .name = "spear1340-miphy",
+ .owner = THIS_MODULE,
+ .pm = &spear1340_miphy_pm_ops,
+ .of_match_table = of_match_ptr(spear1340_miphy_of_match),
+ },
+};
+
+static int __init spear1340_miphy_phy_init(void)
+{
+ return platform_driver_register(&spear1340_miphy_driver);
+}
+module_init(spear1340_miphy_phy_init);
+
+static void __exit spear1340_miphy_phy_exit(void)
+{
+ platform_driver_unregister(&spear1340_miphy_driver);
+}
+module_exit(spear1340_miphy_phy_exit);
+
+MODULE_DESCRIPTION("ST SPEAR1340-MIPHY driver");
+MODULE_AUTHOR("Pratyush Anand <pratyush.anand@st.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 0042ccb46b9a..bfd2c2e9f6cd 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -11,10 +11,10 @@ menu "Pin controllers"
depends on PINCTRL
config PINMUX
- bool "Support pin multiplexing controllers"
+ bool "Support pin multiplexing controllers" if COMPILE_TEST
config PINCONF
- bool "Support pin configuration controllers"
+ bool "Support pin configuration controllers" if COMPILE_TEST
config GENERIC_PINCONF
bool
@@ -26,29 +26,6 @@ config DEBUG_PINCTRL
help
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
-config PINCTRL_ABX500
- bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions"
- depends on AB8500_CORE
- select GENERIC_PINCONF
- help
- Select this to enable the ABx500 family IC GPIO driver
-
-config PINCTRL_AB8500
- bool "AB8500 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
-
-config PINCTRL_AB8540
- bool "AB8540 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
-
-config PINCTRL_AB9540
- bool "AB9540 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
-
-config PINCTRL_AB8505
- bool "AB8505 pin controller driver"
- depends on PINCTRL_ABX500 && ARCH_U8500
-
config PINCTRL_ADI2
bool "ADI pin controller driver"
depends on BLACKFIN
@@ -93,7 +70,7 @@ config PINCTRL_AT91
config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
depends on GPIOLIB && ACPI && X86
- select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
help
driver for memory mapped GPIO functionality on Intel Baytrail
platforms. Supports 3 banks with 102, 28 and 44 gpios.
@@ -130,6 +107,13 @@ config PINCTRL_IMX1_CORE
select PINMUX
select PINCONF
+config PINCTRL_IMX1
+ bool "IMX1 pinctrl driver"
+ depends on SOC_IMX1
+ select PINCTRL_IMX1_CORE
+ help
+ Say Y here to enable the imx1 pinctrl driver
+
config PINCTRL_IMX27
bool "IMX27 pinctrl driver"
depends on SOC_IMX27
@@ -226,58 +210,6 @@ config PINCTRL_IMX28
bool
select PINCTRL_MXS
-config PINCTRL_MSM
- bool
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
-
-config PINCTRL_APQ8064
- tristate "Qualcomm APQ8064 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm TLMM block found in the Qualcomm APQ8064 platform.
-
-config PINCTRL_IPQ8064
- tristate "Qualcomm IPQ8064 pin controller driver"
- depends on GPIOLIB && OF
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm TLMM block found in the Qualcomm IPQ8064 platform.
-
-config PINCTRL_MSM8X74
- tristate "Qualcomm 8x74 pin controller driver"
- depends on GPIOLIB && OF && (ARCH_QCOM || COMPILE_TEST)
- select PINCTRL_MSM
- help
- This is the pinctrl, pinmux, pinconf and gpiolib driver for the
- Qualcomm TLMM block found in the Qualcomm 8974 platform.
-
-config PINCTRL_NOMADIK
- bool "Nomadik pin controller driver"
- depends on ARCH_U8500 || ARCH_NOMADIK
- select PINMUX
- select PINCONF
- select GPIOLIB
- select OF_GPIO
- select GPIOLIB_IRQCHIP
-
-config PINCTRL_STN8815
- bool "STN8815 pin controller driver"
- depends on PINCTRL_NOMADIK && ARCH_NOMADIK
-
-config PINCTRL_DB8500
- bool "DB8500 pin controller driver"
- depends on PINCTRL_NOMADIK && ARCH_U8500
-
-config PINCTRL_DB8540
- bool "DB8540 pin controller driver"
- depends on PINCTRL_NOMADIK && ARCH_U8500
-
config PINCTRL_ROCKCHIP
bool
select PINMUX
@@ -328,6 +260,12 @@ config PINCTRL_TEGRA124
bool
select PINCTRL_TEGRA
+config PINCTRL_TEGRA_XUSB
+ def_bool y if ARCH_TEGRA
+ select GENERIC_PHY
+ select PINCONF
+ select PINMUX
+
config PINCTRL_TZ1090
bool "Toumaz Xenif TZ1090 pin control driver"
depends on SOC_TZ1090
@@ -356,22 +294,6 @@ config PINCTRL_COH901
COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
ports of 8 GPIO pins each.
-config PINCTRL_SAMSUNG
- bool
- select PINMUX
- select PINCONF
-
-config PINCTRL_EXYNOS
- bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440"
- depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
- select PINCTRL_SAMSUNG
-
-config PINCTRL_EXYNOS5440
- bool "Samsung EXYNOS5440 SoC pinctrl driver"
- depends on SOC_EXYNOS5440
- select PINMUX
- select PINCONF
-
config PINCTRL_PALMAS
bool "Pinctrl driver for the PALMAS Series MFD devices"
depends on OF && MFD_PALMAS
@@ -383,18 +305,11 @@ config PINCTRL_PALMAS
open drain configuration for the Palmas series devices like
TPS65913, TPS80036 etc.
-config PINCTRL_S3C24XX
- bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX
- select PINCTRL_SAMSUNG
-
-config PINCTRL_S3C64XX
- bool "Samsung S3C64XX SoC pinctrl driver"
- depends on ARCH_S3C64XX
- select PINCTRL_SAMSUNG
-
source "drivers/pinctrl/berlin/Kconfig"
source "drivers/pinctrl/mvebu/Kconfig"
+source "drivers/pinctrl/nomadik/Kconfig"
+source "drivers/pinctrl/qcom/Kconfig"
+source "drivers/pinctrl/samsung/Kconfig"
source "drivers/pinctrl/sh-pfc/Kconfig"
source "drivers/pinctrl/spear/Kconfig"
source "drivers/pinctrl/sunxi/Kconfig"
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index c4b5d405b8f5..05d227508c95 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -9,11 +9,6 @@ ifeq ($(CONFIG_OF),y)
obj-$(CONFIG_PINCTRL) += devicetree.o
endif
obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
-obj-$(CONFIG_PINCTRL_ABX500) += pinctrl-abx500.o
-obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o
-obj-$(CONFIG_PINCTRL_AB8540) += pinctrl-ab8540.o
-obj-$(CONFIG_PINCTRL_AB9540) += pinctrl-ab9540.o
-obj-$(CONFIG_PINCTRL_AB8505) += pinctrl-ab8505.o
obj-$(CONFIG_PINCTRL_ADI2) += pinctrl-adi2.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
obj-$(CONFIG_PINCTRL_BF54x) += pinctrl-adi2-bf54x.o
@@ -24,6 +19,7 @@ obj-$(CONFIG_PINCTRL_BAYTRAIL) += pinctrl-baytrail.o
obj-$(CONFIG_PINCTRL_BCM281XX) += pinctrl-bcm281xx.o
obj-$(CONFIG_PINCTRL_IMX) += pinctrl-imx.o
obj-$(CONFIG_PINCTRL_IMX1_CORE) += pinctrl-imx1-core.o
+obj-$(CONFIG_PINCTRL_IMX1) += pinctrl-imx1.o
obj-$(CONFIG_PINCTRL_IMX27) += pinctrl-imx27.o
obj-$(CONFIG_PINCTRL_IMX35) += pinctrl-imx35.o
obj-$(CONFIG_PINCTRL_IMX50) += pinctrl-imx50.o
@@ -38,14 +34,6 @@ obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o
obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
-obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
-obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
-obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
-obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
-obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o
-obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o
-obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o
-obj-$(CONFIG_PINCTRL_DB8540) += pinctrl-nomadik-db8540.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
@@ -55,15 +43,11 @@ obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
obj-$(CONFIG_PINCTRL_TEGRA30) += pinctrl-tegra30.o
obj-$(CONFIG_PINCTRL_TEGRA114) += pinctrl-tegra114.o
obj-$(CONFIG_PINCTRL_TEGRA124) += pinctrl-tegra124.o
+obj-$(CONFIG_PINCTRL_TEGRA_XUSB) += pinctrl-tegra-xusb.o
obj-$(CONFIG_PINCTRL_TZ1090) += pinctrl-tz1090.o
obj-$(CONFIG_PINCTRL_TZ1090_PDC) += pinctrl-tz1090-pdc.o
obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
-obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
-obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
-obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
-obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
-obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
@@ -72,8 +56,11 @@ obj-$(CONFIG_PINCTRL_VF610) += pinctrl-vf610.o
obj-$(CONFIG_ARCH_BERLIN) += berlin/
obj-$(CONFIG_PLAT_ORION) += mvebu/
+obj-y += nomadik/
+obj-$(CONFIG_ARCH_QCOM) += qcom/
+obj-$(CONFIG_PLAT_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/
obj-$(CONFIG_SUPERH) += sh-pfc/
obj-$(CONFIG_PLAT_SPEAR) += spear/
-obj-$(CONFIG_ARCH_VT8500) += vt8500/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_VT8500) += vt8500/
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e09474ecde23..e4f65510c87e 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -992,29 +992,15 @@ int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
if (p->state) {
/*
- * The set of groups with a mux configuration in the old state
- * may not be identical to the set of groups with a mux setting
- * in the new state. While this might be unusual, it's entirely
- * possible for the "user"-supplied mapping table to be written
- * that way. For each group that was configured in the old state
- * but not in the new state, this code puts that group into a
- * safe/disabled state.
+ * For each pinmux setting in the old state, forget SW's record
+ * of mux owner for that pingroup. Any pingroups which are
+ * still owned by the new state will be re-acquired by the call
+ * to pinmux_enable_setting() in the loop below.
*/
list_for_each_entry(setting, &p->state->settings, node) {
- bool found = false;
if (setting->type != PIN_MAP_TYPE_MUX_GROUP)
continue;
- list_for_each_entry(setting2, &state->settings, node) {
- if (setting2->type != PIN_MAP_TYPE_MUX_GROUP)
- continue;
- if (setting2->data.mux.group ==
- setting->data.mux.group) {
- found = true;
- break;
- }
- }
- if (!found)
- pinmux_disable_setting(setting);
+ pinmux_disable_setting(setting);
}
}
diff --git a/drivers/pinctrl/nomadik/Kconfig b/drivers/pinctrl/nomadik/Kconfig
new file mode 100644
index 000000000000..d48a5aa24a29
--- /dev/null
+++ b/drivers/pinctrl/nomadik/Kconfig
@@ -0,0 +1,51 @@
+if ARCH_U8500
+
+config PINCTRL_ABX500
+ bool "ST-Ericsson ABx500 family Mixed Signal Circuit gpio functions"
+ depends on AB8500_CORE
+ select GENERIC_PINCONF
+ help
+ Select this to enable the ABx500 family IC GPIO driver
+
+config PINCTRL_AB8500
+ bool "AB8500 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB8540
+ bool "AB8540 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB9540
+ bool "AB9540 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
+config PINCTRL_AB8505
+ bool "AB8505 pin controller driver"
+ depends on PINCTRL_ABX500 && ARCH_U8500
+
+endif
+
+if (ARCH_U8500 || ARCH_NOMADIK)
+
+config PINCTRL_NOMADIK
+ bool "Nomadik pin controller driver"
+ depends on ARCH_U8500 || ARCH_NOMADIK
+ select PINMUX
+ select PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select GPIOLIB_IRQCHIP
+
+config PINCTRL_STN8815
+ bool "STN8815 pin controller driver"
+ depends on PINCTRL_NOMADIK && ARCH_NOMADIK
+
+config PINCTRL_DB8500
+ bool "DB8500 pin controller driver"
+ depends on PINCTRL_NOMADIK && ARCH_U8500
+
+config PINCTRL_DB8540
+ bool "DB8540 pin controller driver"
+ depends on PINCTRL_NOMADIK && ARCH_U8500
+
+endif
diff --git a/drivers/pinctrl/nomadik/Makefile b/drivers/pinctrl/nomadik/Makefile
new file mode 100644
index 000000000000..30b27f18cd52
--- /dev/null
+++ b/drivers/pinctrl/nomadik/Makefile
@@ -0,0 +1,10 @@
+# Nomadik family pin control drivers
+obj-$(CONFIG_PINCTRL_ABX500) += pinctrl-abx500.o
+obj-$(CONFIG_PINCTRL_AB8500) += pinctrl-ab8500.o
+obj-$(CONFIG_PINCTRL_AB8540) += pinctrl-ab8540.o
+obj-$(CONFIG_PINCTRL_AB9540) += pinctrl-ab9540.o
+obj-$(CONFIG_PINCTRL_AB8505) += pinctrl-ab8505.o
+obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o
+obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o
+obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o
+obj-$(CONFIG_PINCTRL_DB8540) += pinctrl-nomadik-db8540.o
diff --git a/drivers/pinctrl/pinctrl-ab8500.c b/drivers/pinctrl/nomadik/pinctrl-ab8500.c
index 2ac2d0ad3025..2ac2d0ad3025 100644
--- a/drivers/pinctrl/pinctrl-ab8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab8500.c
diff --git a/drivers/pinctrl/pinctrl-ab8505.c b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
index bf0ef4ac376f..bf0ef4ac376f 100644
--- a/drivers/pinctrl/pinctrl-ab8505.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab8505.c
diff --git a/drivers/pinctrl/pinctrl-ab8540.c b/drivers/pinctrl/nomadik/pinctrl-ab8540.c
index 9867535d49c1..9867535d49c1 100644
--- a/drivers/pinctrl/pinctrl-ab8540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab8540.c
diff --git a/drivers/pinctrl/pinctrl-ab9540.c b/drivers/pinctrl/nomadik/pinctrl-ab9540.c
index 1a281ca95dac..1a281ca95dac 100644
--- a/drivers/pinctrl/pinctrl-ab9540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-ab9540.c
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 163da9c3ea0e..a53a689a2bfa 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -32,8 +32,8 @@
#include <linux/pinctrl/machine.h>
#include "pinctrl-abx500.h"
-#include "core.h"
-#include "pinconf.h"
+#include "../core.h"
+#include "../pinconf.h"
/*
* The AB9540 and AB8540 GPIO support are extended versions
@@ -737,20 +737,6 @@ static int abx500_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
return ret;
}
-static void abx500_pmx_disable(struct pinctrl_dev *pctldev,
- unsigned function, unsigned group)
-{
- struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
- const struct abx500_pingroup *g;
-
- g = &pct->soc->groups[group];
- if (g->altsetting < 0)
- return;
-
- /* FIXME: poke out the mux, set the pin to some default state? */
- dev_dbg(pct->dev, "disable group %s, %u pins\n", g->name, g->npins);
-}
-
static int abx500_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset)
@@ -799,7 +785,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
.get_function_name = abx500_pmx_get_func_name,
.get_function_groups = abx500_pmx_get_func_groups,
.enable = abx500_pmx_enable,
- .disable = abx500_pmx_disable,
.gpio_request_enable = abx500_gpio_request_enable,
.gpio_disable_free = abx500_gpio_disable_free,
};
diff --git a/drivers/pinctrl/pinctrl-abx500.h b/drivers/pinctrl/nomadik/pinctrl-abx500.h
index 2beef3bfe9ca..2beef3bfe9ca 100644
--- a/drivers/pinctrl/pinctrl-abx500.h
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.h
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
index c74840729648..c74840729648 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8540.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
index d7ba5443bae0..d7ba5443bae0 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8540.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-db8540.c
diff --git a/drivers/pinctrl/pinctrl-nomadik-stn8815.c b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
index ed39dcafd4f8..ed39dcafd4f8 100644
--- a/drivers/pinctrl/pinctrl-nomadik-stn8815.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 8f6f16ef73f3..e7cab07eef47 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -31,7 +31,7 @@
/* Since we request GPIOs from ourself */
#include <linux/pinctrl/consumer.h>
#include "pinctrl-nomadik.h"
-#include "core.h"
+#include "../core.h"
/*
* The GPIO module in the Nomadik family of Systems-on-Chip is an
@@ -1765,21 +1765,6 @@ out_glitch:
return ret;
}
-static void nmk_pmx_disable(struct pinctrl_dev *pctldev,
- unsigned function, unsigned group)
-{
- struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
- const struct nmk_pingroup *g;
-
- g = &npct->soc->groups[group];
-
- if (g->altsetting < 0)
- return;
-
- /* Poke out the mux, set the pin to some default state? */
- dev_dbg(npct->dev, "disable group %s, %u pins\n", g->name, g->npins);
-}
-
static int nmk_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset)
@@ -1826,7 +1811,6 @@ static const struct pinmux_ops nmk_pinmux_ops = {
.get_function_name = nmk_pmx_get_func_name,
.get_function_groups = nmk_pmx_get_func_groups,
.enable = nmk_pmx_enable,
- .disable = nmk_pmx_disable,
.gpio_request_enable = nmk_gpio_request_enable,
.gpio_disable_free = nmk_gpio_disable_free,
};
diff --git a/drivers/pinctrl/pinctrl-nomadik.h b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
index d8215f1e70c7..d8215f1e70c7 100644
--- a/drivers/pinctrl/pinctrl-nomadik.h
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.h
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index 5c44feb54ebb..b092b93c67a1 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -401,7 +401,7 @@ static int adi_gpio_irq_type(struct irq_data *d, unsigned int type)
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
- snprintf(buf, 16, "gpio-irq%d", irq);
+ snprintf(buf, 16, "gpio-irq%u", irq);
port_setup(port, d->hwirq, true);
} else
goto out;
@@ -652,35 +652,6 @@ static int adi_pinmux_enable(struct pinctrl_dev *pctldev, unsigned func_id,
return 0;
}
-static void adi_pinmux_disable(struct pinctrl_dev *pctldev, unsigned func_id,
- unsigned group_id)
-{
- struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
- struct gpio_port *port;
- struct pinctrl_gpio_range *range;
- unsigned long flags;
- unsigned short *mux, pin;
-
- mux = (unsigned short *)pinctrl->soc->groups[group_id].mux;
-
- while (*mux) {
- pin = P_IDENT(*mux);
-
- range = pinctrl_find_gpio_range_from_pin(pctldev, pin);
- if (range == NULL) /* should not happen */
- return;
-
- port = container_of(range->gc, struct gpio_port, chip);
-
- spin_lock_irqsave(&port->lock, flags);
-
- port_setup(port, pin_to_offset(range, pin), true);
- mux++;
-
- spin_unlock_irqrestore(&port->lock, flags);
- }
-}
-
static int adi_pinmux_get_funcs_count(struct pinctrl_dev *pctldev)
{
struct adi_pinctrl *pinctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -728,7 +699,6 @@ static int adi_pinmux_request_gpio(struct pinctrl_dev *pctldev,
static struct pinmux_ops adi_pinmux_ops = {
.enable = adi_pinmux_enable,
- .disable = adi_pinmux_disable,
.get_functions_count = adi_pinmux_get_funcs_count,
.get_function_name = adi_pinmux_get_func_name,
.get_function_groups = adi_pinmux_get_groups,
@@ -979,7 +949,7 @@ static int adi_gpio_probe(struct platform_device *pdev)
struct gpio_port *port;
char pinctrl_devname[DEVNAME_SIZE];
static int gpio;
- int ret = 0, ret1;
+ int ret = 0;
pdata = dev->platform_data;
if (!pdata)
@@ -1057,7 +1027,7 @@ static int adi_gpio_probe(struct platform_device *pdev)
return 0;
out_remove_gpiochip:
- ret1 = gpiochip_remove(&port->chip);
+ gpiochip_remove(&port->chip);
out_remove_domain:
if (port->pint)
irq_domain_remove(port->domain);
@@ -1068,12 +1038,11 @@ out_remove_domain:
static int adi_gpio_remove(struct platform_device *pdev)
{
struct gpio_port *port = platform_get_drvdata(pdev);
- int ret;
u8 offset;
list_del(&port->node);
gpiochip_remove_pin_ranges(&port->chip);
- ret = gpiochip_remove(&port->chip);
+ gpiochip_remove(&port->chip);
if (port->pint) {
for (offset = 0; offset < port->width; offset++)
irq_dispose_mapping(irq_find_mapping(port->domain,
@@ -1081,7 +1050,7 @@ static int adi_gpio_remove(struct platform_device *pdev)
irq_domain_remove(port->domain);
}
- return ret;
+ return 0;
}
static int adi_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index c862f9c0e9ce..0e4ec91f4d49 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -565,7 +565,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
{
struct as3722_pctrl_info *as_pci;
int ret;
- int tret;
as_pci = devm_kzalloc(&pdev->dev, sizeof(*as_pci), GFP_KERNEL);
if (!as_pci)
@@ -611,10 +610,7 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
return 0;
fail_range_add:
- tret = gpiochip_remove(&as_pci->gpio_chip);
- if (tret < 0)
- dev_warn(&pdev->dev, "Couldn't remove gpio chip, %d\n", tret);
-
+ gpiochip_remove(&as_pci->gpio_chip);
fail_chip_add:
pinctrl_unregister(as_pci->pctl);
return ret;
@@ -623,11 +619,8 @@ fail_chip_add:
static int as3722_pinctrl_remove(struct platform_device *pdev)
{
struct as3722_pctrl_info *as_pci = platform_get_drvdata(pdev);
- int ret;
- ret = gpiochip_remove(&as_pci->gpio_chip);
- if (ret < 0)
- return ret;
+ gpiochip_remove(&as_pci->gpio_chip);
pinctrl_unregister(as_pci->pctl);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 421493cb490c..af1ba4fc150d 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -611,26 +611,6 @@ static int at91_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static void at91_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
-{
- struct at91_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
- const struct at91_pmx_pin *pins_conf = info->groups[group].pins_conf;
- const struct at91_pmx_pin *pin;
- uint32_t npins = info->groups[group].npins;
- int i;
- unsigned mask;
- void __iomem *pio;
-
- for (i = 0; i < npins; i++) {
- pin = &pins_conf[i];
- at91_pin_dbg(info->dev, pin);
- pio = pin_to_controller(info, pin->bank);
- mask = pin_to_mask(pin->pin);
- at91_mux_gpio_enable(pio, mask, 1);
- }
-}
-
static int at91_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
{
struct at91_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
@@ -705,7 +685,6 @@ static const struct pinmux_ops at91_pmx_ops = {
.get_function_name = at91_pmx_get_func_name,
.get_function_groups = at91_pmx_get_groups,
.enable = at91_pmx_enable,
- .disable = at91_pmx_disable,
.gpio_request_enable = at91_gpio_request_enable,
.gpio_disable_free = at91_gpio_disable_free,
};
@@ -793,9 +772,9 @@ static void at91_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
unsigned long config;
- int ret, val, num_conf = 0;
+ int val, num_conf = 0;
- ret = at91_pinconf_get(pctldev, pin_id, &config);
+ at91_pinconf_get(pctldev, pin_id, &config);
DBG_SHOW_FLAG(MULTI_DRIVE);
DBG_SHOW_FLAG(PULL_UP);
@@ -945,7 +924,7 @@ static int at91_pinctrl_parse_functions(struct device_node *np,
/* Initialise function */
func->name = np->name;
func->ngroups = of_get_child_count(np);
- if (func->ngroups <= 0) {
+ if (func->ngroups == 0) {
dev_err(info->dev, "no groups defined\n");
return -EINVAL;
}
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 975572e2f260..9ca59a018743 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -25,9 +25,7 @@
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include <linux/gpio.h>
-#include <linux/irqdomain.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
@@ -44,6 +42,7 @@
/* BYT_CONF0_REG register bits */
#define BYT_IODEN BIT(31)
+#define BYT_DIRECT_IRQ_EN BIT(27)
#define BYT_TRIG_NEG BIT(26)
#define BYT_TRIG_POS BIT(25)
#define BYT_TRIG_LVL BIT(24)
@@ -137,7 +136,6 @@ static struct pinctrl_gpio_range byt_ranges[] = {
struct byt_gpio {
struct gpio_chip chip;
- struct irq_domain *domain;
struct platform_device *pdev;
spinlock_t lock;
void __iomem *reg_base;
@@ -217,7 +215,7 @@ static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
static int byt_irq_type(struct irq_data *d, unsigned type)
{
- struct byt_gpio *vg = irq_data_get_irq_chip_data(d);
+ struct byt_gpio *vg = to_byt_gpio(irq_data_get_irq_chip_data(d));
u32 offset = irqd_to_hwirq(d);
u32 value;
unsigned long flags;
@@ -303,12 +301,22 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
struct byt_gpio *vg = to_byt_gpio(chip);
+ void __iomem *conf_reg = byt_gpio_reg(chip, gpio, BYT_CONF0_REG);
void __iomem *reg = byt_gpio_reg(chip, gpio, BYT_VAL_REG);
unsigned long flags;
u32 reg_val;
spin_lock_irqsave(&vg->lock, flags);
+ /*
+ * Before making any direction modifications, do a check if gpio
+ * is set for direct IRQ. On baytrail, setting GPIO to output does
+ * not make sense, so let's at least warn the caller before they shoot
+ * themselves in the foot.
+ */
+ WARN(readl(conf_reg) & BYT_DIRECT_IRQ_EN,
+ "Potential Error: Setting GPIO with direct_irq_en to output");
+
reg_val = readl(reg) | BYT_DIR_MASK;
reg_val &= ~BYT_OUTPUT_EN;
@@ -393,16 +401,10 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
spin_unlock_irqrestore(&vg->lock, flags);
}
-static int byt_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct byt_gpio *vg = to_byt_gpio(chip);
- return irq_create_mapping(vg->domain, offset);
-}
-
static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
- struct byt_gpio *vg = irq_data_get_irq_handler_data(data);
+ struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_data_get_irq_chip(data);
u32 base, pin, mask;
void __iomem *reg;
@@ -421,7 +423,7 @@ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
/* Clear before handling so we can't lose an edge */
writel(mask, reg);
- virq = irq_find_mapping(vg->domain, base + pin);
+ virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
generic_handle_irq(virq);
/* In case bios or user sets triggering incorretly a pin
@@ -454,33 +456,11 @@ static void byt_irq_mask(struct irq_data *d)
{
}
-static int byt_irq_reqres(struct irq_data *d)
-{
- struct byt_gpio *vg = irq_data_get_irq_chip_data(d);
-
- if (gpio_lock_as_irq(&vg->chip, irqd_to_hwirq(d))) {
- dev_err(vg->chip.dev,
- "unable to lock HW IRQ %lu for IRQ\n",
- irqd_to_hwirq(d));
- return -EINVAL;
- }
- return 0;
-}
-
-static void byt_irq_relres(struct irq_data *d)
-{
- struct byt_gpio *vg = irq_data_get_irq_chip_data(d);
-
- gpio_unlock_as_irq(&vg->chip, irqd_to_hwirq(d));
-}
-
static struct irq_chip byt_irqchip = {
.name = "BYT-GPIO",
.irq_mask = byt_irq_mask,
.irq_unmask = byt_irq_unmask,
.irq_set_type = byt_irq_type,
- .irq_request_resources = byt_irq_reqres,
- .irq_release_resources = byt_irq_relres,
};
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
@@ -501,23 +481,6 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
}
}
-static int byt_gpio_irq_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct byt_gpio *vg = d->host_data;
-
- irq_set_chip_and_handler_name(virq, &byt_irqchip, handle_simple_irq,
- "demux");
- irq_set_chip_data(virq, vg);
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
-
- return 0;
-}
-
-static const struct irq_domain_ops byt_gpio_irq_ops = {
- .map = byt_gpio_irq_map,
-};
-
static int byt_gpio_probe(struct platform_device *pdev)
{
struct byt_gpio *vg;
@@ -527,7 +490,6 @@ static int byt_gpio_probe(struct platform_device *pdev)
struct acpi_device *acpi_dev;
struct pinctrl_gpio_range *range;
acpi_handle handle = ACPI_HANDLE(dev);
- unsigned hwirq;
int ret;
if (acpi_bus_get_device(handle, &acpi_dev))
@@ -574,27 +536,27 @@ static int byt_gpio_probe(struct platform_device *pdev)
gc->can_sleep = false;
gc->dev = dev;
+ ret = gpiochip_add(gc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
+ return ret;
+ }
+
/* set up interrupts */
irq_rc = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_rc && irq_rc->start) {
- hwirq = irq_rc->start;
- gc->to_irq = byt_gpio_to_irq;
-
- vg->domain = irq_domain_add_linear(NULL, gc->ngpio,
- &byt_gpio_irq_ops, vg);
- if (!vg->domain)
- return -ENXIO;
-
byt_gpio_irq_init_hw(vg);
+ ret = gpiochip_irqchip_add(gc, &byt_irqchip, 0,
+ handle_simple_irq, IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(dev, "failed to add irqchip\n");
+ gpiochip_remove(gc);
+ return ret;
+ }
- irq_set_handler_data(hwirq, vg);
- irq_set_chained_handler(hwirq, byt_gpio_irq_handler);
- }
-
- ret = gpiochip_add(gc);
- if (ret) {
- dev_err(&pdev->dev, "failed adding byt-gpio chip\n");
- return ret;
+ gpiochip_set_chained_irqchip(gc, &byt_irqchip,
+ (unsigned)irq_rc->start,
+ byt_gpio_irq_handler);
}
pm_runtime_enable(dev);
@@ -627,12 +589,9 @@ MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
static int byt_gpio_remove(struct platform_device *pdev)
{
struct byt_gpio *vg = platform_get_drvdata(pdev);
- int err;
pm_runtime_disable(&pdev->dev);
- err = gpiochip_remove(&vg->chip);
- if (err)
- dev_warn(&pdev->dev, "failed to remove gpio_chip.\n");
+ gpiochip_remove(&vg->chip);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-bcm281xx.c b/drivers/pinctrl/pinctrl-bcm281xx.c
index 3bed792b2c03..c5ca9e633fff 100644
--- a/drivers/pinctrl/pinctrl-bcm281xx.c
+++ b/drivers/pinctrl/pinctrl-bcm281xx.c
@@ -1396,7 +1396,7 @@ static struct pinctrl_desc bcm281xx_pinctrl_desc = {
.owner = THIS_MODULE,
};
-int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
+static int __init bcm281xx_pinctrl_probe(struct platform_device *pdev)
{
struct bcm281xx_pinctrl_data *pdata = &bcm281xx_pinctrl;
struct resource *res;
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index 3d907de9bc91..5bcfd7ace0cd 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -841,16 +841,6 @@ static int bcm2835_pmx_enable(struct pinctrl_dev *pctldev,
return 0;
}
-static void bcm2835_pmx_disable(struct pinctrl_dev *pctldev,
- unsigned func_selector,
- unsigned group_selector)
-{
- struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
-
- /* disable by setting to GPIO_IN */
- bcm2835_pinctrl_fsel_set(pc, group_selector, BCM2835_FSEL_GPIO_IN);
-}
-
static void bcm2835_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset)
@@ -880,7 +870,6 @@ static const struct pinmux_ops bcm2835_pmx_ops = {
.get_function_name = bcm2835_pmx_get_function_name,
.get_function_groups = bcm2835_pmx_get_function_groups,
.enable = bcm2835_pmx_enable,
- .disable = bcm2835_pmx_disable,
.gpio_disable_free = bcm2835_pmx_gpio_disable_free,
.gpio_set_direction = bcm2835_pmx_gpio_set_direction,
};
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index d182fdd2e715..29cbbab8c3a6 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -756,8 +756,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
err_no_range:
err_no_irqchip:
- if (gpiochip_remove(&gpio->chip))
- dev_err(&pdev->dev, "failed to remove gpio chip\n");
+ gpiochip_remove(&gpio->chip);
err_no_chip:
clk_disable_unprepare(gpio->clk);
dev_err(&pdev->dev, "module ERROR:%d\n", err);
@@ -767,16 +766,11 @@ err_no_chip:
static int __exit u300_gpio_remove(struct platform_device *pdev)
{
struct u300_gpio *gpio = platform_get_drvdata(pdev);
- int err;
/* Turn off the GPIO block */
writel(0x00000000U, gpio->base + U300_GPIO_CR);
- err = gpiochip_remove(&gpio->chip);
- if (err < 0) {
- dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err);
- return err;
- }
+ gpiochip_remove(&gpio->chip);
clk_disable_unprepare(gpio->clk);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index a24448e5d399..946d594a64dd 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -515,7 +515,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
/* Initialise function */
func->name = np->name;
func->num_groups = of_get_child_count(np);
- if (func->num_groups <= 0) {
+ if (func->num_groups == 0) {
dev_err(info->dev, "no groups defined in %s\n", np->full_name);
return -EINVAL;
}
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index 815384b377b5..483420757c9f 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -526,7 +526,7 @@ static int imx1_pinctrl_parse_functions(struct device_node *np,
/* Initialise function */
func->name = np->name;
func->num_groups = of_get_child_count(np);
- if (func->num_groups <= 0)
+ if (func->num_groups == 0)
return -EINVAL;
func->groups = devm_kzalloc(info->dev,
diff --git a/drivers/pinctrl/pinctrl-imx1.c b/drivers/pinctrl/pinctrl-imx1.c
new file mode 100644
index 000000000000..533a6e519648
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-imx1.c
@@ -0,0 +1,279 @@
+/*
+ * i.MX1 pinctrl driver based on imx pinmux core
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-imx1.h"
+
+#define PAD_ID(port, pin) ((port) * 32 + (pin))
+#define PA 0
+#define PB 1
+#define PC 2
+#define PD 3
+
+enum imx1_pads {
+ MX1_PAD_A24 = PAD_ID(PA, 0),
+ MX1_PAD_TIN = PAD_ID(PA, 1),
+ MX1_PAD_PWMO = PAD_ID(PA, 2),
+ MX1_PAD_CSI_MCLK = PAD_ID(PA, 3),
+ MX1_PAD_CSI_D0 = PAD_ID(PA, 4),
+ MX1_PAD_CSI_D1 = PAD_ID(PA, 5),
+ MX1_PAD_CSI_D2 = PAD_ID(PA, 6),
+ MX1_PAD_CSI_D3 = PAD_ID(PA, 7),
+ MX1_PAD_CSI_D4 = PAD_ID(PA, 8),
+ MX1_PAD_CSI_D5 = PAD_ID(PA, 9),
+ MX1_PAD_CSI_D6 = PAD_ID(PA, 10),
+ MX1_PAD_CSI_D7 = PAD_ID(PA, 11),
+ MX1_PAD_CSI_VSYNC = PAD_ID(PA, 12),
+ MX1_PAD_CSI_HSYNC = PAD_ID(PA, 13),
+ MX1_PAD_CSI_PIXCLK = PAD_ID(PA, 14),
+ MX1_PAD_I2C_SDA = PAD_ID(PA, 15),
+ MX1_PAD_I2C_SCL = PAD_ID(PA, 16),
+ MX1_PAD_DTACK = PAD_ID(PA, 17),
+ MX1_PAD_BCLK = PAD_ID(PA, 18),
+ MX1_PAD_LBA = PAD_ID(PA, 19),
+ MX1_PAD_ECB = PAD_ID(PA, 20),
+ MX1_PAD_A0 = PAD_ID(PA, 21),
+ MX1_PAD_CS4 = PAD_ID(PA, 22),
+ MX1_PAD_CS5 = PAD_ID(PA, 23),
+ MX1_PAD_A16 = PAD_ID(PA, 24),
+ MX1_PAD_A17 = PAD_ID(PA, 25),
+ MX1_PAD_A18 = PAD_ID(PA, 26),
+ MX1_PAD_A19 = PAD_ID(PA, 27),
+ MX1_PAD_A20 = PAD_ID(PA, 28),
+ MX1_PAD_A21 = PAD_ID(PA, 29),
+ MX1_PAD_A22 = PAD_ID(PA, 30),
+ MX1_PAD_A23 = PAD_ID(PA, 31),
+ MX1_PAD_SD_DAT0 = PAD_ID(PB, 8),
+ MX1_PAD_SD_DAT1 = PAD_ID(PB, 9),
+ MX1_PAD_SD_DAT2 = PAD_ID(PB, 10),
+ MX1_PAD_SD_DAT3 = PAD_ID(PB, 11),
+ MX1_PAD_SD_SCLK = PAD_ID(PB, 12),
+ MX1_PAD_SD_CMD = PAD_ID(PB, 13),
+ MX1_PAD_SIM_SVEN = PAD_ID(PB, 14),
+ MX1_PAD_SIM_PD = PAD_ID(PB, 15),
+ MX1_PAD_SIM_TX = PAD_ID(PB, 16),
+ MX1_PAD_SIM_RX = PAD_ID(PB, 17),
+ MX1_PAD_SIM_RST = PAD_ID(PB, 18),
+ MX1_PAD_SIM_CLK = PAD_ID(PB, 19),
+ MX1_PAD_USBD_AFE = PAD_ID(PB, 20),
+ MX1_PAD_USBD_OE = PAD_ID(PB, 21),
+ MX1_PAD_USBD_RCV = PAD_ID(PB, 22),
+ MX1_PAD_USBD_SUSPND = PAD_ID(PB, 23),
+ MX1_PAD_USBD_VP = PAD_ID(PB, 24),
+ MX1_PAD_USBD_VM = PAD_ID(PB, 25),
+ MX1_PAD_USBD_VPO = PAD_ID(PB, 26),
+ MX1_PAD_USBD_VMO = PAD_ID(PB, 27),
+ MX1_PAD_UART2_CTS = PAD_ID(PB, 28),
+ MX1_PAD_UART2_RTS = PAD_ID(PB, 29),
+ MX1_PAD_UART2_TXD = PAD_ID(PB, 30),
+ MX1_PAD_UART2_RXD = PAD_ID(PB, 31),
+ MX1_PAD_SSI_RXFS = PAD_ID(PC, 3),
+ MX1_PAD_SSI_RXCLK = PAD_ID(PC, 4),
+ MX1_PAD_SSI_RXDAT = PAD_ID(PC, 5),
+ MX1_PAD_SSI_TXDAT = PAD_ID(PC, 6),
+ MX1_PAD_SSI_TXFS = PAD_ID(PC, 7),
+ MX1_PAD_SSI_TXCLK = PAD_ID(PC, 8),
+ MX1_PAD_UART1_CTS = PAD_ID(PC, 9),
+ MX1_PAD_UART1_RTS = PAD_ID(PC, 10),
+ MX1_PAD_UART1_TXD = PAD_ID(PC, 11),
+ MX1_PAD_UART1_RXD = PAD_ID(PC, 12),
+ MX1_PAD_SPI1_RDY = PAD_ID(PC, 13),
+ MX1_PAD_SPI1_SCLK = PAD_ID(PC, 14),
+ MX1_PAD_SPI1_SS = PAD_ID(PC, 15),
+ MX1_PAD_SPI1_MISO = PAD_ID(PC, 16),
+ MX1_PAD_SPI1_MOSI = PAD_ID(PC, 17),
+ MX1_PAD_BT13 = PAD_ID(PC, 19),
+ MX1_PAD_BT12 = PAD_ID(PC, 20),
+ MX1_PAD_BT11 = PAD_ID(PC, 21),
+ MX1_PAD_BT10 = PAD_ID(PC, 22),
+ MX1_PAD_BT9 = PAD_ID(PC, 23),
+ MX1_PAD_BT8 = PAD_ID(PC, 24),
+ MX1_PAD_BT7 = PAD_ID(PC, 25),
+ MX1_PAD_BT6 = PAD_ID(PC, 26),
+ MX1_PAD_BT5 = PAD_ID(PC, 27),
+ MX1_PAD_BT4 = PAD_ID(PC, 28),
+ MX1_PAD_BT3 = PAD_ID(PC, 29),
+ MX1_PAD_BT2 = PAD_ID(PC, 30),
+ MX1_PAD_BT1 = PAD_ID(PC, 31),
+ MX1_PAD_LSCLK = PAD_ID(PD, 6),
+ MX1_PAD_REV = PAD_ID(PD, 7),
+ MX1_PAD_CLS = PAD_ID(PD, 8),
+ MX1_PAD_PS = PAD_ID(PD, 9),
+ MX1_PAD_SPL_SPR = PAD_ID(PD, 10),
+ MX1_PAD_CONTRAST = PAD_ID(PD, 11),
+ MX1_PAD_ACD_OE = PAD_ID(PD, 12),
+ MX1_PAD_LP_HSYNC = PAD_ID(PD, 13),
+ MX1_PAD_FLM_VSYNC = PAD_ID(PD, 14),
+ MX1_PAD_LD0 = PAD_ID(PD, 15),
+ MX1_PAD_LD1 = PAD_ID(PD, 16),
+ MX1_PAD_LD2 = PAD_ID(PD, 17),
+ MX1_PAD_LD3 = PAD_ID(PD, 18),
+ MX1_PAD_LD4 = PAD_ID(PD, 19),
+ MX1_PAD_LD5 = PAD_ID(PD, 20),
+ MX1_PAD_LD6 = PAD_ID(PD, 21),
+ MX1_PAD_LD7 = PAD_ID(PD, 22),
+ MX1_PAD_LD8 = PAD_ID(PD, 23),
+ MX1_PAD_LD9 = PAD_ID(PD, 24),
+ MX1_PAD_LD10 = PAD_ID(PD, 25),
+ MX1_PAD_LD11 = PAD_ID(PD, 26),
+ MX1_PAD_LD12 = PAD_ID(PD, 27),
+ MX1_PAD_LD13 = PAD_ID(PD, 28),
+ MX1_PAD_LD14 = PAD_ID(PD, 29),
+ MX1_PAD_LD15 = PAD_ID(PD, 30),
+ MX1_PAD_TMR2OUT = PAD_ID(PD, 31),
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imx1_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(MX1_PAD_A24),
+ IMX_PINCTRL_PIN(MX1_PAD_TIN),
+ IMX_PINCTRL_PIN(MX1_PAD_PWMO),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_MCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D0),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D1),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D2),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D3),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D4),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D5),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D6),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_D7),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_VSYNC),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_HSYNC),
+ IMX_PINCTRL_PIN(MX1_PAD_CSI_PIXCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_I2C_SDA),
+ IMX_PINCTRL_PIN(MX1_PAD_I2C_SCL),
+ IMX_PINCTRL_PIN(MX1_PAD_DTACK),
+ IMX_PINCTRL_PIN(MX1_PAD_BCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_LBA),
+ IMX_PINCTRL_PIN(MX1_PAD_ECB),
+ IMX_PINCTRL_PIN(MX1_PAD_A0),
+ IMX_PINCTRL_PIN(MX1_PAD_CS4),
+ IMX_PINCTRL_PIN(MX1_PAD_CS5),
+ IMX_PINCTRL_PIN(MX1_PAD_A16),
+ IMX_PINCTRL_PIN(MX1_PAD_A17),
+ IMX_PINCTRL_PIN(MX1_PAD_A18),
+ IMX_PINCTRL_PIN(MX1_PAD_A19),
+ IMX_PINCTRL_PIN(MX1_PAD_A20),
+ IMX_PINCTRL_PIN(MX1_PAD_A21),
+ IMX_PINCTRL_PIN(MX1_PAD_A22),
+ IMX_PINCTRL_PIN(MX1_PAD_A23),
+ IMX_PINCTRL_PIN(MX1_PAD_SD_DAT0),
+ IMX_PINCTRL_PIN(MX1_PAD_SD_DAT1),
+ IMX_PINCTRL_PIN(MX1_PAD_SD_DAT2),
+ IMX_PINCTRL_PIN(MX1_PAD_SD_DAT3),
+ IMX_PINCTRL_PIN(MX1_PAD_SD_SCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_SD_CMD),
+ IMX_PINCTRL_PIN(MX1_PAD_SIM_SVEN),
+ IMX_PINCTRL_PIN(MX1_PAD_SIM_PD),
+ IMX_PINCTRL_PIN(MX1_PAD_SIM_TX),
+ IMX_PINCTRL_PIN(MX1_PAD_SIM_RX),
+ IMX_PINCTRL_PIN(MX1_PAD_SIM_CLK),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_AFE),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_OE),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_RCV),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_SUSPND),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_VP),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_VM),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_VPO),
+ IMX_PINCTRL_PIN(MX1_PAD_USBD_VMO),
+ IMX_PINCTRL_PIN(MX1_PAD_UART2_CTS),
+ IMX_PINCTRL_PIN(MX1_PAD_UART2_RTS),
+ IMX_PINCTRL_PIN(MX1_PAD_UART2_TXD),
+ IMX_PINCTRL_PIN(MX1_PAD_UART2_RXD),
+ IMX_PINCTRL_PIN(MX1_PAD_SSI_RXFS),
+ IMX_PINCTRL_PIN(MX1_PAD_SSI_RXCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_SSI_RXDAT),
+ IMX_PINCTRL_PIN(MX1_PAD_SSI_TXDAT),
+ IMX_PINCTRL_PIN(MX1_PAD_SSI_TXFS),
+ IMX_PINCTRL_PIN(MX1_PAD_SSI_TXCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_UART1_CTS),
+ IMX_PINCTRL_PIN(MX1_PAD_UART1_RTS),
+ IMX_PINCTRL_PIN(MX1_PAD_UART1_TXD),
+ IMX_PINCTRL_PIN(MX1_PAD_UART1_RXD),
+ IMX_PINCTRL_PIN(MX1_PAD_SPI1_RDY),
+ IMX_PINCTRL_PIN(MX1_PAD_SPI1_SCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_SPI1_SS),
+ IMX_PINCTRL_PIN(MX1_PAD_SPI1_MISO),
+ IMX_PINCTRL_PIN(MX1_PAD_SPI1_MOSI),
+ IMX_PINCTRL_PIN(MX1_PAD_BT13),
+ IMX_PINCTRL_PIN(MX1_PAD_BT12),
+ IMX_PINCTRL_PIN(MX1_PAD_BT11),
+ IMX_PINCTRL_PIN(MX1_PAD_BT10),
+ IMX_PINCTRL_PIN(MX1_PAD_BT9),
+ IMX_PINCTRL_PIN(MX1_PAD_BT8),
+ IMX_PINCTRL_PIN(MX1_PAD_BT7),
+ IMX_PINCTRL_PIN(MX1_PAD_BT6),
+ IMX_PINCTRL_PIN(MX1_PAD_BT5),
+ IMX_PINCTRL_PIN(MX1_PAD_BT4),
+ IMX_PINCTRL_PIN(MX1_PAD_BT3),
+ IMX_PINCTRL_PIN(MX1_PAD_BT2),
+ IMX_PINCTRL_PIN(MX1_PAD_BT1),
+ IMX_PINCTRL_PIN(MX1_PAD_LSCLK),
+ IMX_PINCTRL_PIN(MX1_PAD_REV),
+ IMX_PINCTRL_PIN(MX1_PAD_CLS),
+ IMX_PINCTRL_PIN(MX1_PAD_PS),
+ IMX_PINCTRL_PIN(MX1_PAD_SPL_SPR),
+ IMX_PINCTRL_PIN(MX1_PAD_CONTRAST),
+ IMX_PINCTRL_PIN(MX1_PAD_ACD_OE),
+ IMX_PINCTRL_PIN(MX1_PAD_LP_HSYNC),
+ IMX_PINCTRL_PIN(MX1_PAD_FLM_VSYNC),
+ IMX_PINCTRL_PIN(MX1_PAD_LD0),
+ IMX_PINCTRL_PIN(MX1_PAD_LD1),
+ IMX_PINCTRL_PIN(MX1_PAD_LD2),
+ IMX_PINCTRL_PIN(MX1_PAD_LD3),
+ IMX_PINCTRL_PIN(MX1_PAD_LD4),
+ IMX_PINCTRL_PIN(MX1_PAD_LD5),
+ IMX_PINCTRL_PIN(MX1_PAD_LD6),
+ IMX_PINCTRL_PIN(MX1_PAD_LD7),
+ IMX_PINCTRL_PIN(MX1_PAD_LD8),
+ IMX_PINCTRL_PIN(MX1_PAD_LD9),
+ IMX_PINCTRL_PIN(MX1_PAD_LD10),
+ IMX_PINCTRL_PIN(MX1_PAD_LD11),
+ IMX_PINCTRL_PIN(MX1_PAD_LD12),
+ IMX_PINCTRL_PIN(MX1_PAD_LD13),
+ IMX_PINCTRL_PIN(MX1_PAD_LD14),
+ IMX_PINCTRL_PIN(MX1_PAD_LD15),
+ IMX_PINCTRL_PIN(MX1_PAD_TMR2OUT),
+};
+
+static struct imx1_pinctrl_soc_info imx1_pinctrl_info = {
+ .pins = imx1_pinctrl_pads,
+ .npins = ARRAY_SIZE(imx1_pinctrl_pads),
+};
+
+static int __init imx1_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx1_pinctrl_core_probe(pdev, &imx1_pinctrl_info);
+}
+
+static const struct of_device_id imx1_pinctrl_of_match[] = {
+ { .compatible = "fsl,imx1-iomuxc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx1_pinctrl_of_match);
+
+static struct platform_driver imx1_pinctrl_driver = {
+ .driver = {
+ .name = "imx1-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = imx1_pinctrl_of_match,
+ },
+ .remove = imx1_pinctrl_core_remove,
+};
+module_platform_driver_probe(imx1_pinctrl_driver, imx1_pinctrl_probe);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Freescale i.MX1 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-imx27.c b/drivers/pinctrl/pinctrl-imx27.c
index 417c99205bc2..f8dfefb69968 100644
--- a/drivers/pinctrl/pinctrl-imx27.c
+++ b/drivers/pinctrl/pinctrl-imx27.c
@@ -63,10 +63,6 @@ enum imx27_pads {
MX27_PAD_CONTRAST = PAD_ID(PA, 30),
MX27_PAD_OE_ACD = PAD_ID(PA, 31),
- MX27_PAD_UNUSED0 = PAD_ID(PB, 0),
- MX27_PAD_UNUSED1 = PAD_ID(PB, 1),
- MX27_PAD_UNUSED2 = PAD_ID(PB, 2),
- MX27_PAD_UNUSED3 = PAD_ID(PB, 3),
MX27_PAD_SD2_D0 = PAD_ID(PB, 4),
MX27_PAD_SD2_D1 = PAD_ID(PB, 5),
MX27_PAD_SD2_D2 = PAD_ID(PB, 6),
@@ -96,11 +92,6 @@ enum imx27_pads {
MX27_PAD_USBH1_RXDM = PAD_ID(PB, 30),
MX27_PAD_USBH1_RXDP = PAD_ID(PB, 31),
- MX27_PAD_UNUSED4 = PAD_ID(PC, 0),
- MX27_PAD_UNUSED5 = PAD_ID(PC, 1),
- MX27_PAD_UNUSED6 = PAD_ID(PC, 2),
- MX27_PAD_UNUSED7 = PAD_ID(PC, 3),
- MX27_PAD_UNUSED8 = PAD_ID(PC, 4),
MX27_PAD_I2C2_SDA = PAD_ID(PC, 5),
MX27_PAD_I2C2_SCL = PAD_ID(PC, 6),
MX27_PAD_USBOTG_DATA5 = PAD_ID(PC, 7),
@@ -188,12 +179,6 @@ enum imx27_pads {
MX27_PAD_SD1_CLK = PAD_ID(PE, 23),
MX27_PAD_USBOTG_CLK = PAD_ID(PE, 24),
MX27_PAD_USBOTG_DATA7 = PAD_ID(PE, 25),
- MX27_PAD_UNUSED9 = PAD_ID(PE, 26),
- MX27_PAD_UNUSED10 = PAD_ID(PE, 27),
- MX27_PAD_UNUSED11 = PAD_ID(PE, 28),
- MX27_PAD_UNUSED12 = PAD_ID(PE, 29),
- MX27_PAD_UNUSED13 = PAD_ID(PE, 30),
- MX27_PAD_UNUSED14 = PAD_ID(PE, 31),
MX27_PAD_NFRB = PAD_ID(PF, 0),
MX27_PAD_NFCLE = PAD_ID(PF, 1),
@@ -219,14 +204,6 @@ enum imx27_pads {
MX27_PAD_CS4_B = PAD_ID(PF, 21),
MX27_PAD_CS5_B = PAD_ID(PF, 22),
MX27_PAD_ATA_DATA15 = PAD_ID(PF, 23),
- MX27_PAD_UNUSED15 = PAD_ID(PF, 24),
- MX27_PAD_UNUSED16 = PAD_ID(PF, 25),
- MX27_PAD_UNUSED17 = PAD_ID(PF, 26),
- MX27_PAD_UNUSED18 = PAD_ID(PF, 27),
- MX27_PAD_UNUSED19 = PAD_ID(PF, 28),
- MX27_PAD_UNUSED20 = PAD_ID(PF, 29),
- MX27_PAD_UNUSED21 = PAD_ID(PF, 30),
- MX27_PAD_UNUSED22 = PAD_ID(PF, 31),
};
/* Pad names for the pinmux subsystem */
@@ -264,10 +241,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX27_PAD_CONTRAST),
IMX_PINCTRL_PIN(MX27_PAD_OE_ACD),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED0),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED1),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED2),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED3),
IMX_PINCTRL_PIN(MX27_PAD_SD2_D0),
IMX_PINCTRL_PIN(MX27_PAD_SD2_D1),
IMX_PINCTRL_PIN(MX27_PAD_SD2_D2),
@@ -297,11 +270,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX27_PAD_USBH1_RXDM),
IMX_PINCTRL_PIN(MX27_PAD_USBH1_RXDP),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED4),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED5),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED6),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED7),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED8),
IMX_PINCTRL_PIN(MX27_PAD_I2C2_SDA),
IMX_PINCTRL_PIN(MX27_PAD_I2C2_SCL),
IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA5),
@@ -389,12 +357,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX27_PAD_SD1_CLK),
IMX_PINCTRL_PIN(MX27_PAD_USBOTG_CLK),
IMX_PINCTRL_PIN(MX27_PAD_USBOTG_DATA7),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED9),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED10),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED11),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED12),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED13),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED14),
IMX_PINCTRL_PIN(MX27_PAD_NFRB),
IMX_PINCTRL_PIN(MX27_PAD_NFCLE),
@@ -420,14 +382,6 @@ static const struct pinctrl_pin_desc imx27_pinctrl_pads[] = {
IMX_PINCTRL_PIN(MX27_PAD_CS4_B),
IMX_PINCTRL_PIN(MX27_PAD_CS5_B),
IMX_PINCTRL_PIN(MX27_PAD_ATA_DATA15),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED15),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED16),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED17),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED18),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED19),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED20),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED21),
- IMX_PINCTRL_PIN(MX27_PAD_UNUSED22),
};
static struct imx1_pinctrl_soc_info imx27_pinctrl_info = {
@@ -440,12 +394,6 @@ static struct of_device_id imx27_pinctrl_of_match[] = {
{ /* sentinel */ }
};
-struct imx27_pinctrl_private {
- int num_gpio_childs;
- struct platform_device **gpio_dev;
- struct mxc_gpio_platform_data *gpio_pdata;
-};
-
static int imx27_pinctrl_probe(struct platform_device *pdev)
{
return imx1_pinctrl_core_probe(pdev, &imx27_pinctrl_info);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index bb805d5e9ff0..5e8b2e04cd7a 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -62,11 +62,26 @@ enum rockchip_pinctrl_type {
RK2928,
RK3066B,
RK3188,
+ RK3288,
};
-enum rockchip_pin_bank_type {
- COMMON_BANK,
- RK3188_BANK0,
+/**
+ * Encode variants of iomux registers into a type variable
+ */
+#define IOMUX_GPIO_ONLY BIT(0)
+#define IOMUX_WIDTH_4BIT BIT(1)
+#define IOMUX_SOURCE_PMU BIT(2)
+#define IOMUX_UNROUTED BIT(3)
+
+/**
+ * @type: iomux variant using IOMUX_* constants
+ * @offset: if initialized to -1 it will be autocalculated, by specifying
+ * an initial offset value the relevant source offset can be reset
+ * to a new value for autocalculating the following iomux registers.
+ */
+struct rockchip_iomux {
+ int type;
+ int offset;
};
/**
@@ -78,6 +93,7 @@ enum rockchip_pin_bank_type {
* @nr_pins: number of pins in this bank
* @name: name of the bank
* @bank_num: number of the bank, to account for holes
+ * @iomux: array describing the 4 iomux sources of the bank
* @valid: are all necessary informations present
* @of_node: dt node of this bank
* @drvdata: common pinctrl basedata
@@ -95,7 +111,7 @@ struct rockchip_pin_bank {
u8 nr_pins;
char *name;
u8 bank_num;
- enum rockchip_pin_bank_type bank_type;
+ struct rockchip_iomux iomux[4];
bool valid;
struct device_node *of_node;
struct rockchip_pinctrl *drvdata;
@@ -111,6 +127,25 @@ struct rockchip_pin_bank {
.bank_num = id, \
.nr_pins = pins, \
.name = label, \
+ .iomux = { \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ { .offset = -1 }, \
+ }, \
+ }
+
+#define PIN_BANK_IOMUX_FLAGS(id, pins, label, iom0, iom1, iom2, iom3) \
+ { \
+ .bank_num = id, \
+ .nr_pins = pins, \
+ .name = label, \
+ .iomux = { \
+ { .type = iom0, .offset = -1 }, \
+ { .type = iom1, .offset = -1 }, \
+ { .type = iom2, .offset = -1 }, \
+ { .type = iom3, .offset = -1 }, \
+ }, \
}
/**
@@ -121,7 +156,8 @@ struct rockchip_pin_ctrl {
u32 nr_pins;
char *label;
enum rockchip_pinctrl_type type;
- int mux_offset;
+ int grf_mux_offset;
+ int pmu_mux_offset;
void (*pull_calc_reg)(struct rockchip_pin_bank *bank,
int pin_num, struct regmap **regmap,
int *reg, u8 *bit);
@@ -343,24 +379,42 @@ static const struct pinctrl_ops rockchip_pctrl_ops = {
static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
{
struct rockchip_pinctrl *info = bank->drvdata;
+ int iomux_num = (pin / 8);
+ struct regmap *regmap;
unsigned int val;
- int reg, ret;
+ int reg, ret, mask;
u8 bit;
- if (bank->bank_type == RK3188_BANK0 && pin < 16)
+ if (iomux_num > 3)
+ return -EINVAL;
+
+ if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) {
+ dev_err(info->dev, "pin %d is unrouted\n", pin);
+ return -EINVAL;
+ }
+
+ if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY)
return RK_FUNC_GPIO;
+ regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+ ? info->regmap_pmu : info->regmap_base;
+
/* get basic quadrupel of mux registers and the correct reg inside */
- reg = info->ctrl->mux_offset;
- reg += bank->bank_num * 0x10;
- reg += (pin / 8) * 4;
- bit = (pin % 8) * 2;
+ mask = (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) ? 0xf : 0x3;
+ reg = bank->iomux[iomux_num].offset;
+ if (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) {
+ if ((pin % 8) >= 4)
+ reg += 0x4;
+ bit = (pin % 4) * 4;
+ } else {
+ bit = (pin % 8) * 2;
+ }
- ret = regmap_read(info->regmap_base, reg, &val);
+ ret = regmap_read(regmap, reg, &val);
if (ret)
return ret;
- return ((val >> bit) & 3);
+ return ((val >> bit) & mask);
}
/*
@@ -379,16 +433,22 @@ static int rockchip_get_mux(struct rockchip_pin_bank *bank, int pin)
static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
{
struct rockchip_pinctrl *info = bank->drvdata;
- int reg, ret;
+ int iomux_num = (pin / 8);
+ struct regmap *regmap;
+ int reg, ret, mask;
unsigned long flags;
u8 bit;
u32 data;
- /*
- * The first 16 pins of rk3188_bank0 are always gpios and do not have
- * a mux register at all.
- */
- if (bank->bank_type == RK3188_BANK0 && pin < 16) {
+ if (iomux_num > 3)
+ return -EINVAL;
+
+ if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) {
+ dev_err(info->dev, "pin %d is unrouted\n", pin);
+ return -EINVAL;
+ }
+
+ if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY) {
if (mux != RK_FUNC_GPIO) {
dev_err(info->dev,
"pin %d only supports a gpio mux\n", pin);
@@ -401,17 +461,25 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n",
bank->bank_num, pin, mux);
+ regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
+ ? info->regmap_pmu : info->regmap_base;
+
/* get basic quadrupel of mux registers and the correct reg inside */
- reg = info->ctrl->mux_offset;
- reg += bank->bank_num * 0x10;
- reg += (pin / 8) * 4;
- bit = (pin % 8) * 2;
+ mask = (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) ? 0xf : 0x3;
+ reg = bank->iomux[iomux_num].offset;
+ if (bank->iomux[iomux_num].type & IOMUX_WIDTH_4BIT) {
+ if ((pin % 8) >= 4)
+ reg += 0x4;
+ bit = (pin % 4) * 4;
+ } else {
+ bit = (pin % 8) * 2;
+ }
spin_lock_irqsave(&bank->slock, flags);
- data = (3 << (bit + 16));
- data |= (mux & 3) << bit;
- ret = regmap_write(info->regmap_base, reg, data);
+ data = (mask << (bit + 16));
+ data |= (mux & mask) << bit;
+ ret = regmap_write(regmap, reg, data);
spin_unlock_irqrestore(&bank->slock, flags);
@@ -449,7 +517,7 @@ static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
struct rockchip_pinctrl *info = bank->drvdata;
/* The first 12 pins of the first bank are located elsewhere */
- if (bank->bank_type == RK3188_BANK0 && pin_num < 12) {
+ if (bank->bank_num == 0 && pin_num < 12) {
*regmap = info->regmap_pmu ? info->regmap_pmu
: bank->regmap_pull;
*reg = info->regmap_pmu ? RK3188_PULL_PMU_OFFSET : 0;
@@ -476,6 +544,127 @@ static void rk3188_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
}
}
+#define RK3288_PULL_OFFSET 0x140
+static void rk3288_calc_pull_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3188_PULL_PMU_OFFSET;
+
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3188_PULL_PINS_PER_REG;
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3288_PULL_OFFSET;
+
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * RK3188_PULL_BANK_STRIDE;
+ *reg += ((pin_num / RK3188_PULL_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3188_PULL_PINS_PER_REG);
+ *bit *= RK3188_PULL_BITS_PER_PIN;
+ }
+}
+
+#define RK3288_DRV_PMU_OFFSET 0x70
+#define RK3288_DRV_GRF_OFFSET 0x1c0
+#define RK3288_DRV_BITS_PER_PIN 2
+#define RK3288_DRV_PINS_PER_REG 8
+#define RK3288_DRV_BANK_STRIDE 16
+static int rk3288_drv_list[] = { 2, 4, 8, 12 };
+
+static void rk3288_calc_drv_reg_and_bit(struct rockchip_pin_bank *bank,
+ int pin_num, struct regmap **regmap,
+ int *reg, u8 *bit)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+
+ /* The first 24 pins of the first bank are located in PMU */
+ if (bank->bank_num == 0) {
+ *regmap = info->regmap_pmu;
+ *reg = RK3288_DRV_PMU_OFFSET;
+
+ *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+ *bit = pin_num % RK3288_DRV_PINS_PER_REG;
+ *bit *= RK3288_DRV_BITS_PER_PIN;
+ } else {
+ *regmap = info->regmap_base;
+ *reg = RK3288_DRV_GRF_OFFSET;
+
+ /* correct the offset, as we're starting with the 2nd bank */
+ *reg -= 0x10;
+ *reg += bank->bank_num * RK3288_DRV_BANK_STRIDE;
+ *reg += ((pin_num / RK3288_DRV_PINS_PER_REG) * 4);
+
+ *bit = (pin_num % RK3288_DRV_PINS_PER_REG);
+ *bit *= RK3288_DRV_BITS_PER_PIN;
+ }
+}
+
+static int rk3288_get_drive(struct rockchip_pin_bank *bank, int pin_num)
+{
+ struct regmap *regmap;
+ int reg, ret;
+ u32 data;
+ u8 bit;
+
+ rk3288_calc_drv_reg_and_bit(bank, pin_num, &regmap, &reg, &bit);
+
+ ret = regmap_read(regmap, reg, &data);
+ if (ret)
+ return ret;
+
+ data >>= bit;
+ data &= (1 << RK3288_DRV_BITS_PER_PIN) - 1;
+
+ return rk3288_drv_list[data];
+}
+
+static int rk3288_set_drive(struct rockchip_pin_bank *bank, int pin_num,
+ int strength)
+{
+ struct rockchip_pinctrl *info = bank->drvdata;
+ struct regmap *regmap;
+ unsigned long flags;
+ int reg, ret, i;
+ u32 data;
+ u8 bit;
+
+ rk3288_calc_drv_reg_and_bit(bank, pin_num, &regmap, &reg, &bit);
+
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(rk3288_drv_list); i++) {
+ if (rk3288_drv_list[i] == strength) {
+ ret = i;
+ break;
+ }
+ }
+
+ if (ret < 0) {
+ dev_err(info->dev, "unsupported driver strength %d\n",
+ strength);
+ return ret;
+ }
+
+ spin_lock_irqsave(&bank->slock, flags);
+
+ /* enable the write to the equivalent lower bits */
+ data = ((1 << RK3288_DRV_BITS_PER_PIN) - 1) << (bit + 16);
+ data |= (ret << bit);
+
+ ret = regmap_write(regmap, reg, data);
+ spin_unlock_irqrestore(&bank->slock, flags);
+
+ return ret;
+}
+
static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
{
struct rockchip_pinctrl *info = bank->drvdata;
@@ -501,6 +690,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
? PIN_CONFIG_BIAS_PULL_PIN_DEFAULT
: PIN_CONFIG_BIAS_DISABLE;
case RK3188:
+ case RK3288:
data >>= bit;
data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
@@ -555,6 +745,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
spin_unlock_irqrestore(&bank->slock, flags);
break;
case RK3188:
+ case RK3288:
spin_lock_irqsave(&bank->slock, flags);
/* enable the write to the equivalent lower bits */
@@ -657,23 +848,6 @@ static int rockchip_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static void rockchip_pmx_disable(struct pinctrl_dev *pctldev,
- unsigned selector, unsigned group)
-{
- struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
- const unsigned int *pins = info->groups[group].pins;
- struct rockchip_pin_bank *bank;
- int cnt;
-
- dev_dbg(info->dev, "disable function %s group %s\n",
- info->functions[selector].name, info->groups[group].name);
-
- for (cnt = 0; cnt < info->groups[group].npins; cnt++) {
- bank = pin_to_bank(info, pins[cnt]);
- rockchip_set_mux(bank, pins[cnt] - bank->pin_base, 0);
- }
-}
-
/*
* The calls to gpio_direction_output() and gpio_direction_input()
* leads to this function call (via the pinctrl_gpio_direction_{input|output}()
@@ -716,7 +890,6 @@ static const struct pinmux_ops rockchip_pmx_ops = {
.get_function_name = rockchip_pmx_get_func_name,
.get_function_groups = rockchip_pmx_get_groups,
.enable = rockchip_pmx_enable,
- .disable = rockchip_pmx_disable,
.gpio_set_direction = rockchip_pmx_gpio_set_direction,
};
@@ -734,6 +907,7 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
case RK3066B:
return pull ? false : true;
case RK3188:
+ case RK3288:
return (pull != PIN_CONFIG_BIAS_PULL_PIN_DEFAULT);
}
@@ -788,6 +962,15 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc)
return rc;
break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ /* rk3288 is the first with per-pin drive-strength */
+ if (info->ctrl->type != RK3288)
+ return -ENOTSUPP;
+
+ rc = rk3288_set_drive(bank, pin - bank->pin_base, arg);
+ if (rc < 0)
+ return rc;
+ break;
default:
return -ENOTSUPP;
break;
@@ -837,6 +1020,17 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
arg = rc ? 1 : 0;
break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ /* rk3288 is the first with per-pin drive-strength */
+ if (info->ctrl->type != RK3288)
+ return -ENOTSUPP;
+
+ rc = rk3288_get_drive(bank, pin - bank->pin_base);
+ if (rc < 0)
+ return rc;
+
+ arg = rc;
+ break;
default:
return -ENOTSUPP;
break;
@@ -850,6 +1044,7 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
static const struct pinconf_ops rockchip_pinconf_ops = {
.pin_config_get = rockchip_pinconf_get,
.pin_config_set = rockchip_pinconf_set,
+ .is_generic = true,
};
static const struct of_device_id rockchip_bank_match[] = {
@@ -1414,10 +1609,7 @@ fail:
for (--i, --bank; i >= 0; --i, --bank) {
if (!bank->valid)
continue;
-
- if (gpiochip_remove(&bank->gpio_chip))
- dev_err(&pdev->dev, "gpio chip %s remove failed\n",
- bank->gpio_chip.label);
+ gpiochip_remove(&bank->gpio_chip);
}
return ret;
}
@@ -1427,20 +1619,15 @@ static int rockchip_gpiolib_unregister(struct platform_device *pdev,
{
struct rockchip_pin_ctrl *ctrl = info->ctrl;
struct rockchip_pin_bank *bank = ctrl->pin_banks;
- int ret = 0;
int i;
- for (i = 0; !ret && i < ctrl->nr_banks; ++i, ++bank) {
+ for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
if (!bank->valid)
continue;
-
- ret = gpiochip_remove(&bank->gpio_chip);
+ gpiochip_remove(&bank->gpio_chip);
}
- if (ret)
- dev_err(&pdev->dev, "gpio chip remove failed\n");
-
- return ret;
+ return 0;
}
static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
@@ -1466,8 +1653,6 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
"rockchip,rk3188-gpio-bank0")) {
struct device_node *node;
- bank->bank_type = RK3188_BANK0;
-
node = of_parse_phandle(bank->of_node->parent,
"rockchip,pmu", 0);
if (!node) {
@@ -1487,9 +1672,6 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
base,
&rockchip_regmap_config);
}
-
- } else {
- bank->bank_type = COMMON_BANK;
}
bank->irq = irq_of_parse_and_map(bank->of_node, 0);
@@ -1513,7 +1695,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
struct device_node *np;
struct rockchip_pin_ctrl *ctrl;
struct rockchip_pin_bank *bank;
- int i;
+ int grf_offs, pmu_offs, i, j;
match = of_match_node(rockchip_pinctrl_dt_match, node);
ctrl = (struct rockchip_pin_ctrl *)match->data;
@@ -1535,12 +1717,51 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
}
}
+ grf_offs = ctrl->grf_mux_offset;
+ pmu_offs = ctrl->pmu_mux_offset;
bank = ctrl->pin_banks;
for (i = 0; i < ctrl->nr_banks; ++i, ++bank) {
+ int bank_pins = 0;
+
spin_lock_init(&bank->slock);
bank->drvdata = d;
bank->pin_base = ctrl->nr_pins;
ctrl->nr_pins += bank->nr_pins;
+
+ /* calculate iomux offsets */
+ for (j = 0; j < 4; j++) {
+ struct rockchip_iomux *iom = &bank->iomux[j];
+ int inc;
+
+ if (bank_pins >= bank->nr_pins)
+ break;
+
+ /* preset offset value, set new start value */
+ if (iom->offset >= 0) {
+ if (iom->type & IOMUX_SOURCE_PMU)
+ pmu_offs = iom->offset;
+ else
+ grf_offs = iom->offset;
+ } else { /* set current offset */
+ iom->offset = (iom->type & IOMUX_SOURCE_PMU) ?
+ pmu_offs : grf_offs;
+ }
+
+ dev_dbg(d->dev, "bank %d, iomux %d has offset 0x%x\n",
+ i, j, iom->offset);
+
+ /*
+ * Increase offset according to iomux width.
+ * 4bit iomux'es are spread over two registers.
+ */
+ inc = (iom->type & IOMUX_WIDTH_4BIT) ? 8 : 4;
+ if (iom->type & IOMUX_SOURCE_PMU)
+ pmu_offs += inc;
+ else
+ grf_offs += inc;
+
+ bank_pins += 8;
+ }
}
return ctrl;
@@ -1644,7 +1865,7 @@ static struct rockchip_pin_ctrl rk2928_pin_ctrl = {
.nr_banks = ARRAY_SIZE(rk2928_pin_banks),
.label = "RK2928-GPIO",
.type = RK2928,
- .mux_offset = 0xa8,
+ .grf_mux_offset = 0xa8,
.pull_calc_reg = rk2928_calc_pull_reg_and_bit,
};
@@ -1662,7 +1883,7 @@ static struct rockchip_pin_ctrl rk3066a_pin_ctrl = {
.nr_banks = ARRAY_SIZE(rk3066a_pin_banks),
.label = "RK3066a-GPIO",
.type = RK2928,
- .mux_offset = 0xa8,
+ .grf_mux_offset = 0xa8,
.pull_calc_reg = rk2928_calc_pull_reg_and_bit,
};
@@ -1678,11 +1899,11 @@ static struct rockchip_pin_ctrl rk3066b_pin_ctrl = {
.nr_banks = ARRAY_SIZE(rk3066b_pin_banks),
.label = "RK3066b-GPIO",
.type = RK3066B,
- .mux_offset = 0x60,
+ .grf_mux_offset = 0x60,
};
static struct rockchip_pin_bank rk3188_pin_banks[] = {
- PIN_BANK(0, 32, "gpio0"),
+ PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_GPIO_ONLY, 0, 0, 0),
PIN_BANK(1, 32, "gpio1"),
PIN_BANK(2, 32, "gpio2"),
PIN_BANK(3, 32, "gpio3"),
@@ -1693,10 +1914,52 @@ static struct rockchip_pin_ctrl rk3188_pin_ctrl = {
.nr_banks = ARRAY_SIZE(rk3188_pin_banks),
.label = "RK3188-GPIO",
.type = RK3188,
- .mux_offset = 0x60,
+ .grf_mux_offset = 0x60,
.pull_calc_reg = rk3188_calc_pull_reg_and_bit,
};
+static struct rockchip_pin_bank rk3288_pin_banks[] = {
+ PIN_BANK_IOMUX_FLAGS(0, 24, "gpio0", IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_SOURCE_PMU,
+ IOMUX_UNROUTED
+ ),
+ PIN_BANK_IOMUX_FLAGS(1, 32, "gpio1", IOMUX_UNROUTED,
+ IOMUX_UNROUTED,
+ IOMUX_UNROUTED,
+ 0
+ ),
+ PIN_BANK_IOMUX_FLAGS(2, 32, "gpio2", 0, 0, 0, IOMUX_UNROUTED),
+ PIN_BANK_IOMUX_FLAGS(3, 32, "gpio3", 0, 0, 0, IOMUX_WIDTH_4BIT),
+ PIN_BANK_IOMUX_FLAGS(4, 32, "gpio4", IOMUX_WIDTH_4BIT,
+ IOMUX_WIDTH_4BIT,
+ 0,
+ 0
+ ),
+ PIN_BANK_IOMUX_FLAGS(5, 32, "gpio5", IOMUX_UNROUTED,
+ 0,
+ 0,
+ IOMUX_UNROUTED
+ ),
+ PIN_BANK_IOMUX_FLAGS(6, 32, "gpio6", 0, 0, 0, IOMUX_UNROUTED),
+ PIN_BANK_IOMUX_FLAGS(7, 32, "gpio7", 0,
+ 0,
+ IOMUX_WIDTH_4BIT,
+ IOMUX_UNROUTED
+ ),
+ PIN_BANK(8, 16, "gpio8"),
+};
+
+static struct rockchip_pin_ctrl rk3288_pin_ctrl = {
+ .pin_banks = rk3288_pin_banks,
+ .nr_banks = ARRAY_SIZE(rk3288_pin_banks),
+ .label = "RK3288-GPIO",
+ .type = RK3288,
+ .grf_mux_offset = 0x0,
+ .pmu_mux_offset = 0x84,
+ .pull_calc_reg = rk3288_calc_pull_reg_and_bit,
+};
+
static const struct of_device_id rockchip_pinctrl_dt_match[] = {
{ .compatible = "rockchip,rk2928-pinctrl",
.data = (void *)&rk2928_pin_ctrl },
@@ -1706,6 +1969,8 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
.data = (void *)&rk3066b_pin_ctrl },
{ .compatible = "rockchip,rk3188-pinctrl",
.data = (void *)&rk3188_pin_ctrl },
+ { .compatible = "rockchip,rk3288-pinctrl",
+ .data = (void *)&rk3288_pin_ctrl },
{},
};
MODULE_DEVICE_TABLE(of, rockchip_pinctrl_dt_match);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 2960557bfed9..95dd9cf55cb3 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -488,61 +488,6 @@ static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
return 0;
}
-static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
- unsigned group)
-{
- struct pcs_device *pcs;
- struct pcs_function *func;
- int i;
-
- pcs = pinctrl_dev_get_drvdata(pctldev);
- /* If function mask is null, needn't disable it. */
- if (!pcs->fmask)
- return;
-
- func = radix_tree_lookup(&pcs->ftree, fselector);
- if (!func) {
- dev_err(pcs->dev, "%s could not find function%i\n",
- __func__, fselector);
- return;
- }
-
- /*
- * Ignore disable if function-off is not specified. Some hardware
- * does not have clearly defined disable function. For pin specific
- * off modes, you can use alternate named states as described in
- * pinctrl-bindings.txt.
- */
- if (pcs->foff == PCS_OFF_DISABLED) {
- dev_dbg(pcs->dev, "ignoring disable for %s function%i\n",
- func->name, fselector);
- return;
- }
-
- dev_dbg(pcs->dev, "disabling function%i %s\n",
- fselector, func->name);
-
- for (i = 0; i < func->nvals; i++) {
- struct pcs_func_vals *vals;
- unsigned long flags;
- unsigned val, mask;
-
- vals = &func->vals[i];
- raw_spin_lock_irqsave(&pcs->lock, flags);
- val = pcs->read(vals->reg);
-
- if (pcs->bits_per_mux)
- mask = vals->mask;
- else
- mask = pcs->fmask;
-
- val &= ~mask;
- val |= pcs->foff << pcs->fshift;
- pcs->write(val, vals->reg);
- raw_spin_unlock_irqrestore(&pcs->lock, flags);
- }
-}
-
static int pcs_request_gpio(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned pin)
{
@@ -575,7 +520,6 @@ static const struct pinmux_ops pcs_pinmux_ops = {
.get_function_name = pcs_get_function_name,
.get_function_groups = pcs_get_function_groups,
.enable = pcs_enable,
- .disable = pcs_disable,
.gpio_request_enable = pcs_request_gpio,
};
@@ -836,7 +780,7 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
pin = &pcs->pins.pa[i];
pn = &pcs->names[i];
- sprintf(pn->name, "%lx.%d",
+ sprintf(pn->name, "%lx.%u",
(unsigned long)pcs->res->start + offset, pin_pos);
pin->name = pn->name;
pin->number = i;
@@ -1739,11 +1683,10 @@ static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc)
{
struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
struct irq_chip *chip;
- int res;
chip = irq_get_chip(irq);
chained_irq_enter(chip, desc);
- res = pcs_irq_handle(pcs_soc);
+ pcs_irq_handle(pcs_soc);
/* REVISIT: export and add handle_bad_irq(irq, desc)? */
chained_irq_exit(chip, desc);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 9f43916637ca..5475374d803f 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -930,11 +930,6 @@ static int st_pmx_enable(struct pinctrl_dev *pctldev, unsigned fselector,
return 0;
}
-static void st_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
-{
-}
-
static int st_pmx_set_gpio_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range, unsigned gpio,
bool input)
@@ -957,7 +952,6 @@ static struct pinmux_ops st_pmxops = {
.get_function_name = st_pmx_get_fname,
.get_function_groups = st_pmx_get_groups,
.enable = st_pmx_enable,
- .disable = st_pmx_disable,
.gpio_set_direction = st_pmx_set_gpio_direction,
};
@@ -1178,9 +1172,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
const __be32 *list;
struct property *pp;
struct st_pinconf *conf;
- phandle phandle;
struct device_node *pins;
- u32 pin;
int i = 0, npins = 0, nr_props;
pins = of_get_child_by_name(np, "st,pins");
@@ -1218,8 +1210,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
conf = &grp->pin_conf[i];
/* bank & offset */
- phandle = be32_to_cpup(list++);
- pin = be32_to_cpup(list++);
+ be32_to_cpup(list++);
+ be32_to_cpup(list++);
conf->pin = of_get_named_gpio(pins, pp->name, 0);
conf->name = pp->name;
grp->pins[i] = conf->pin;
@@ -1256,7 +1248,7 @@ static int st_pctl_parse_functions(struct device_node *np,
func = &info->functions[index];
func->name = np->name;
func->ngroups = of_get_child_count(np);
- if (func->ngroups <= 0) {
+ if (func->ngroups == 0) {
dev_err(info->dev, "No groups defined\n");
return -EINVAL;
}
@@ -1454,6 +1446,7 @@ static struct irq_chip st_gpio_irqchip = {
.irq_mask = st_gpio_irq_mask,
.irq_unmask = st_gpio_irq_unmask,
.irq_set_type = st_gpio_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
static int st_gpiolib_register_bank(struct st_pinctrl *info,
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index 26ca6855f478..71c5d4f0c538 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -738,22 +738,6 @@ static int tb10x_pctl_enable(struct pinctrl_dev *pctl,
return 0;
}
-static void tb10x_pctl_disable(struct pinctrl_dev *pctl,
- unsigned func_selector, unsigned group_selector)
-{
- struct tb10x_pinctrl *state = pinctrl_dev_get_drvdata(pctl);
- const struct tb10x_pinfuncgrp *grp = &state->pingroups[group_selector];
-
- if (grp->port < 0)
- return;
-
- mutex_lock(&state->mutex);
-
- state->ports[grp->port].count--;
-
- mutex_unlock(&state->mutex);
-}
-
static struct pinmux_ops tb10x_pinmux_ops = {
.get_functions_count = tb10x_get_functions_count,
.get_function_name = tb10x_get_function_name,
@@ -761,7 +745,6 @@ static struct pinmux_ops tb10x_pinmux_ops = {
.gpio_request_enable = tb10x_gpio_request_enable,
.gpio_disable_free = tb10x_gpio_disable_free,
.enable = tb10x_pctl_enable,
- .disable = tb10x_pctl_disable,
};
static struct pinctrl_desc tb10x_pindesc = {
diff --git a/drivers/pinctrl/pinctrl-tegra-xusb.c b/drivers/pinctrl/pinctrl-tegra-xusb.c
new file mode 100644
index 000000000000..a06620474845
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-tegra-xusb.c
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <dt-bindings/pinctrl/pinctrl-tegra-xusb.h>
+
+#include "core.h"
+#include "pinctrl-utils.h"
+
+#define XUSB_PADCTL_ELPG_PROGRAM 0x01c
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN (1 << 26)
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY (1 << 25)
+#define XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN (1 << 24)
+
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1 0x040
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET (1 << 19)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK (0xf << 12)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST (1 << 1)
+
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2 0x044
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN (1 << 6)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN (1 << 5)
+#define XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL (1 << 4)
+
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1 0x138
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET (1 << 27)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE (1 << 24)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD (1 << 3)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST (1 << 1)
+#define XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ (1 << 0)
+
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1 0x148
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD (1 << 1)
+#define XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ (1 << 0)
+
+struct tegra_xusb_padctl_function {
+ const char *name;
+ const char * const *groups;
+ unsigned int num_groups;
+};
+
+struct tegra_xusb_padctl_group {
+ const unsigned int *funcs;
+ unsigned int num_funcs;
+};
+
+struct tegra_xusb_padctl_soc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int num_pins;
+
+ const struct tegra_xusb_padctl_function *functions;
+ unsigned int num_functions;
+
+ const struct tegra_xusb_padctl_lane *lanes;
+ unsigned int num_lanes;
+};
+
+struct tegra_xusb_padctl_lane {
+ const char *name;
+
+ unsigned int offset;
+ unsigned int shift;
+ unsigned int mask;
+ unsigned int iddq;
+
+ const unsigned int *funcs;
+ unsigned int num_funcs;
+};
+
+struct tegra_xusb_padctl {
+ struct device *dev;
+ void __iomem *regs;
+ struct mutex lock;
+ struct reset_control *rst;
+
+ const struct tegra_xusb_padctl_soc *soc;
+ struct pinctrl_dev *pinctrl;
+ struct pinctrl_desc desc;
+
+ struct phy_provider *provider;
+ struct phy *phys[2];
+
+ unsigned int enable;
+};
+
+static inline void padctl_writel(struct tegra_xusb_padctl *padctl, u32 value,
+ unsigned long offset)
+{
+ writel(value, padctl->regs + offset);
+}
+
+static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl,
+ unsigned long offset)
+{
+ return readl(padctl->regs + offset);
+}
+
+static int tegra_xusb_padctl_get_groups_count(struct pinctrl_dev *pinctrl)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+
+ return padctl->soc->num_pins;
+}
+
+static const char *tegra_xusb_padctl_get_group_name(struct pinctrl_dev *pinctrl,
+ unsigned int group)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+
+ return padctl->soc->pins[group].name;
+}
+
+enum tegra_xusb_padctl_param {
+ TEGRA_XUSB_PADCTL_IDDQ,
+};
+
+static const struct tegra_xusb_padctl_property {
+ const char *name;
+ enum tegra_xusb_padctl_param param;
+} properties[] = {
+ { "nvidia,iddq", TEGRA_XUSB_PADCTL_IDDQ },
+};
+
+#define TEGRA_XUSB_PADCTL_PACK(param, value) ((param) << 16 | (value))
+#define TEGRA_XUSB_PADCTL_UNPACK_PARAM(config) ((config) >> 16)
+#define TEGRA_XUSB_PADCTL_UNPACK_VALUE(config) ((config) & 0xffff)
+
+static int tegra_xusb_padctl_parse_subnode(struct tegra_xusb_padctl *padctl,
+ struct device_node *np,
+ struct pinctrl_map **maps,
+ unsigned int *reserved_maps,
+ unsigned int *num_maps)
+{
+ unsigned int i, reserve = 0, num_configs = 0;
+ unsigned long config, *configs = NULL;
+ const char *function, *group;
+ struct property *prop;
+ int err = 0;
+ u32 value;
+
+ err = of_property_read_string(np, "nvidia,function", &function);
+ if (err < 0) {
+ if (err != -EINVAL)
+ return err;
+
+ function = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(properties); i++) {
+ err = of_property_read_u32(np, properties[i].name, &value);
+ if (err < 0) {
+ if (err == -EINVAL)
+ continue;
+
+ return err;
+ }
+
+ config = TEGRA_XUSB_PADCTL_PACK(properties[i].param, value);
+
+ err = pinctrl_utils_add_config(padctl->pinctrl, &configs,
+ &num_configs, config);
+ if (err < 0)
+ return err;
+ }
+
+ if (function)
+ reserve++;
+
+ if (num_configs)
+ reserve++;
+
+ err = of_property_count_strings(np, "nvidia,lanes");
+ if (err < 0)
+ return err;
+
+ reserve *= err;
+
+ err = pinctrl_utils_reserve_map(padctl->pinctrl, maps, reserved_maps,
+ num_maps, reserve);
+ if (err < 0)
+ return err;
+
+ of_property_for_each_string(np, "nvidia,lanes", prop, group) {
+ if (function) {
+ err = pinctrl_utils_add_map_mux(padctl->pinctrl, maps,
+ reserved_maps, num_maps, group,
+ function);
+ if (err < 0)
+ return err;
+ }
+
+ if (num_configs) {
+ err = pinctrl_utils_add_map_configs(padctl->pinctrl,
+ maps, reserved_maps, num_maps, group,
+ configs, num_configs,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int tegra_xusb_padctl_dt_node_to_map(struct pinctrl_dev *pinctrl,
+ struct device_node *parent,
+ struct pinctrl_map **maps,
+ unsigned int *num_maps)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+ unsigned int reserved_maps = 0;
+ struct device_node *np;
+ int err;
+
+ *num_maps = 0;
+ *maps = NULL;
+
+ for_each_child_of_node(parent, np) {
+ err = tegra_xusb_padctl_parse_subnode(padctl, np, maps,
+ &reserved_maps,
+ num_maps);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct pinctrl_ops tegra_xusb_padctl_pinctrl_ops = {
+ .get_groups_count = tegra_xusb_padctl_get_groups_count,
+ .get_group_name = tegra_xusb_padctl_get_group_name,
+ .dt_node_to_map = tegra_xusb_padctl_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int tegra_xusb_padctl_get_functions_count(struct pinctrl_dev *pinctrl)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+
+ return padctl->soc->num_functions;
+}
+
+static const char *
+tegra_xusb_padctl_get_function_name(struct pinctrl_dev *pinctrl,
+ unsigned int function)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+
+ return padctl->soc->functions[function].name;
+}
+
+static int tegra_xusb_padctl_get_function_groups(struct pinctrl_dev *pinctrl,
+ unsigned int function,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+
+ *num_groups = padctl->soc->functions[function].num_groups;
+ *groups = padctl->soc->functions[function].groups;
+
+ return 0;
+}
+
+static int tegra_xusb_padctl_pinmux_enable(struct pinctrl_dev *pinctrl,
+ unsigned int function,
+ unsigned int group)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+ const struct tegra_xusb_padctl_lane *lane;
+ unsigned int i;
+ u32 value;
+
+ lane = &padctl->soc->lanes[group];
+
+ for (i = 0; i < lane->num_funcs; i++)
+ if (lane->funcs[i] == function)
+ break;
+
+ if (i >= lane->num_funcs)
+ return -EINVAL;
+
+ value = padctl_readl(padctl, lane->offset);
+ value &= ~(lane->mask << lane->shift);
+ value |= i << lane->shift;
+ padctl_writel(padctl, value, lane->offset);
+
+ return 0;
+}
+
+static const struct pinmux_ops tegra_xusb_padctl_pinmux_ops = {
+ .get_functions_count = tegra_xusb_padctl_get_functions_count,
+ .get_function_name = tegra_xusb_padctl_get_function_name,
+ .get_function_groups = tegra_xusb_padctl_get_function_groups,
+ .enable = tegra_xusb_padctl_pinmux_enable,
+};
+
+static int tegra_xusb_padctl_pinconf_group_get(struct pinctrl_dev *pinctrl,
+ unsigned int group,
+ unsigned long *config)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+ const struct tegra_xusb_padctl_lane *lane;
+ enum tegra_xusb_padctl_param param;
+ u32 value;
+
+ param = TEGRA_XUSB_PADCTL_UNPACK_PARAM(*config);
+ lane = &padctl->soc->lanes[group];
+
+ switch (param) {
+ case TEGRA_XUSB_PADCTL_IDDQ:
+ /* lanes with iddq == 0 don't support this parameter */
+ if (lane->iddq == 0)
+ return -EINVAL;
+
+ value = padctl_readl(padctl, lane->offset);
+
+ if (value & BIT(lane->iddq))
+ value = 0;
+ else
+ value = 1;
+
+ *config = TEGRA_XUSB_PADCTL_PACK(param, value);
+ break;
+
+ default:
+ dev_err(padctl->dev, "invalid configuration parameter: %04x\n",
+ param);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tegra_xusb_padctl_pinconf_group_set(struct pinctrl_dev *pinctrl,
+ unsigned int group,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct tegra_xusb_padctl *padctl = pinctrl_dev_get_drvdata(pinctrl);
+ const struct tegra_xusb_padctl_lane *lane;
+ enum tegra_xusb_padctl_param param;
+ unsigned long value;
+ unsigned int i;
+ u32 regval;
+
+ lane = &padctl->soc->lanes[group];
+
+ for (i = 0; i < num_configs; i++) {
+ param = TEGRA_XUSB_PADCTL_UNPACK_PARAM(configs[i]);
+ value = TEGRA_XUSB_PADCTL_UNPACK_VALUE(configs[i]);
+
+ switch (param) {
+ case TEGRA_XUSB_PADCTL_IDDQ:
+ /* lanes with iddq == 0 don't support this parameter */
+ if (lane->iddq == 0)
+ return -EINVAL;
+
+ regval = padctl_readl(padctl, lane->offset);
+
+ if (value)
+ regval &= ~BIT(lane->iddq);
+ else
+ regval |= BIT(lane->iddq);
+
+ padctl_writel(padctl, regval, lane->offset);
+ break;
+
+ default:
+ dev_err(padctl->dev,
+ "invalid configuration parameter: %04x\n",
+ param);
+ return -ENOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const char *strip_prefix(const char *s)
+{
+ const char *comma = strchr(s, ',');
+ if (!comma)
+ return s;
+
+ return comma + 1;
+}
+
+static void
+tegra_xusb_padctl_pinconf_group_dbg_show(struct pinctrl_dev *pinctrl,
+ struct seq_file *s,
+ unsigned int group)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(properties); i++) {
+ unsigned long config, value;
+ int err;
+
+ config = TEGRA_XUSB_PADCTL_PACK(properties[i].param, 0);
+
+ err = tegra_xusb_padctl_pinconf_group_get(pinctrl, group,
+ &config);
+ if (err < 0)
+ continue;
+
+ value = TEGRA_XUSB_PADCTL_UNPACK_VALUE(config);
+
+ seq_printf(s, "\n\t%s=%lu\n", strip_prefix(properties[i].name),
+ value);
+ }
+}
+
+static void
+tegra_xusb_padctl_pinconf_config_dbg_show(struct pinctrl_dev *pinctrl,
+ struct seq_file *s,
+ unsigned long config)
+{
+ enum tegra_xusb_padctl_param param;
+ const char *name = "unknown";
+ unsigned long value;
+ unsigned int i;
+
+ param = TEGRA_XUSB_PADCTL_UNPACK_PARAM(config);
+ value = TEGRA_XUSB_PADCTL_UNPACK_VALUE(config);
+
+ for (i = 0; i < ARRAY_SIZE(properties); i++) {
+ if (properties[i].param == param) {
+ name = properties[i].name;
+ break;
+ }
+ }
+
+ seq_printf(s, "%s=%lu", strip_prefix(name), value);
+}
+#endif
+
+static const struct pinconf_ops tegra_xusb_padctl_pinconf_ops = {
+ .pin_config_group_get = tegra_xusb_padctl_pinconf_group_get,
+ .pin_config_group_set = tegra_xusb_padctl_pinconf_group_set,
+#ifdef CONFIG_DEBUG_FS
+ .pin_config_group_dbg_show = tegra_xusb_padctl_pinconf_group_dbg_show,
+ .pin_config_config_dbg_show = tegra_xusb_padctl_pinconf_config_dbg_show,
+#endif
+};
+
+static int tegra_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
+{
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ if (padctl->enable++ > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value &= ~XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+out:
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static int tegra_xusb_padctl_disable(struct tegra_xusb_padctl *padctl)
+{
+ u32 value;
+
+ mutex_lock(&padctl->lock);
+
+ if (WARN_ON(padctl->enable == 0))
+ goto out;
+
+ if (--padctl->enable > 0)
+ goto out;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_VCORE_DOWN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN_EARLY;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+ usleep_range(100, 200);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_ELPG_PROGRAM);
+ value |= XUSB_PADCTL_ELPG_PROGRAM_AUX_MUX_LP0_CLAMP_EN;
+ padctl_writel(padctl, value, XUSB_PADCTL_ELPG_PROGRAM);
+
+out:
+ mutex_unlock(&padctl->lock);
+ return 0;
+}
+
+static int tegra_xusb_phy_init(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
+
+ return tegra_xusb_padctl_enable(padctl);
+}
+
+static int tegra_xusb_phy_exit(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
+
+ return tegra_xusb_padctl_disable(padctl);
+}
+
+static int pcie_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
+ unsigned long timeout;
+ int err = -ETIMEDOUT;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_REFCLK_SEL_MASK;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
+ value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL2_REFCLKBUF_EN |
+ XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_EN |
+ XUSB_PADCTL_IOPHY_PLL_P0_CTL2_TXCLKREF_SEL;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL2);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+ timeout = jiffies + msecs_to_jiffies(50);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ if (value & XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL0_LOCKDET) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(100, 200);
+ }
+
+ return err;
+}
+
+static int pcie_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_P0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_P0_CTL1);
+
+ return 0;
+}
+
+static const struct phy_ops pcie_phy_ops = {
+ .init = tegra_xusb_phy_init,
+ .exit = tegra_xusb_phy_exit,
+ .power_on = pcie_phy_power_on,
+ .power_off = pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int sata_phy_power_on(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
+ unsigned long timeout;
+ int err = -ETIMEDOUT;
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
+ value &= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ timeout = jiffies + msecs_to_jiffies(50);
+
+ while (time_before(jiffies, timeout)) {
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ if (value & XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_LOCKDET) {
+ err = 0;
+ break;
+ }
+
+ usleep_range(100, 200);
+ }
+
+ return err;
+}
+
+static int sata_phy_power_off(struct phy *phy)
+{
+ struct tegra_xusb_padctl *padctl = phy_get_drvdata(phy);
+ u32 value;
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_RST;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value &= ~XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL1_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_PWR_OVRD;
+ value |= XUSB_PADCTL_IOPHY_PLL_S0_CTL1_PLL_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_PLL_S0_CTL1);
+
+ value = padctl_readl(padctl, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+ value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ_OVRD;
+ value |= ~XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1_IDDQ;
+ padctl_writel(padctl, value, XUSB_PADCTL_IOPHY_MISC_PAD_S0_CTL1);
+
+ return 0;
+}
+
+static const struct phy_ops sata_phy_ops = {
+ .init = tegra_xusb_phy_init,
+ .exit = tegra_xusb_phy_exit,
+ .power_on = sata_phy_power_on,
+ .power_off = sata_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *tegra_xusb_padctl_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct tegra_xusb_padctl *padctl = dev_get_drvdata(dev);
+ unsigned int index = args->args[0];
+
+ if (args->args_count <= 0)
+ return ERR_PTR(-EINVAL);
+
+ if (index > ARRAY_SIZE(padctl->phys))
+ return ERR_PTR(-EINVAL);
+
+ return padctl->phys[index];
+}
+
+#define PIN_OTG_0 0
+#define PIN_OTG_1 1
+#define PIN_OTG_2 2
+#define PIN_ULPI_0 3
+#define PIN_HSIC_0 4
+#define PIN_HSIC_1 5
+#define PIN_PCIE_0 6
+#define PIN_PCIE_1 7
+#define PIN_PCIE_2 8
+#define PIN_PCIE_3 9
+#define PIN_PCIE_4 10
+#define PIN_SATA_0 11
+
+static const struct pinctrl_pin_desc tegra124_pins[] = {
+ PINCTRL_PIN(PIN_OTG_0, "otg-0"),
+ PINCTRL_PIN(PIN_OTG_1, "otg-1"),
+ PINCTRL_PIN(PIN_OTG_2, "otg-2"),
+ PINCTRL_PIN(PIN_ULPI_0, "ulpi-0"),
+ PINCTRL_PIN(PIN_HSIC_0, "hsic-0"),
+ PINCTRL_PIN(PIN_HSIC_1, "hsic-1"),
+ PINCTRL_PIN(PIN_PCIE_0, "pcie-0"),
+ PINCTRL_PIN(PIN_PCIE_1, "pcie-1"),
+ PINCTRL_PIN(PIN_PCIE_2, "pcie-2"),
+ PINCTRL_PIN(PIN_PCIE_3, "pcie-3"),
+ PINCTRL_PIN(PIN_PCIE_4, "pcie-4"),
+ PINCTRL_PIN(PIN_SATA_0, "sata-0"),
+};
+
+static const char * const tegra124_snps_groups[] = {
+ "otg-0",
+ "otg-1",
+ "otg-2",
+ "ulpi-0",
+ "hsic-0",
+ "hsic-1",
+};
+
+static const char * const tegra124_xusb_groups[] = {
+ "otg-0",
+ "otg-1",
+ "otg-2",
+ "ulpi-0",
+ "hsic-0",
+ "hsic-1",
+};
+
+static const char * const tegra124_uart_groups[] = {
+ "otg-0",
+ "otg-1",
+ "otg-2",
+};
+
+static const char * const tegra124_pcie_groups[] = {
+ "pcie-0",
+ "pcie-1",
+ "pcie-2",
+ "pcie-3",
+ "pcie-4",
+ "sata-0",
+};
+
+static const char * const tegra124_usb3_groups[] = {
+ "pcie-0",
+ "pcie-1",
+ "pcie-2",
+ "pcie-3",
+ "pcie-4",
+ "sata-0",
+};
+
+static const char * const tegra124_sata_groups[] = {
+ "pcie-0",
+ "pcie-1",
+ "pcie-2",
+ "pcie-3",
+ "pcie-4",
+ "sata-0",
+};
+
+static const char * const tegra124_rsvd_groups[] = {
+ "otg-0",
+ "otg-1",
+ "otg-2",
+ "pcie-0",
+ "pcie-1",
+ "pcie-2",
+ "pcie-3",
+ "pcie-4",
+ "sata-0",
+};
+
+#define TEGRA124_FUNCTION(_name) \
+ { \
+ .name = #_name, \
+ .num_groups = ARRAY_SIZE(tegra124_##_name##_groups), \
+ .groups = tegra124_##_name##_groups, \
+ }
+
+static struct tegra_xusb_padctl_function tegra124_functions[] = {
+ TEGRA124_FUNCTION(snps),
+ TEGRA124_FUNCTION(xusb),
+ TEGRA124_FUNCTION(uart),
+ TEGRA124_FUNCTION(pcie),
+ TEGRA124_FUNCTION(usb3),
+ TEGRA124_FUNCTION(sata),
+ TEGRA124_FUNCTION(rsvd),
+};
+
+enum tegra124_function {
+ TEGRA124_FUNC_SNPS,
+ TEGRA124_FUNC_XUSB,
+ TEGRA124_FUNC_UART,
+ TEGRA124_FUNC_PCIE,
+ TEGRA124_FUNC_USB3,
+ TEGRA124_FUNC_SATA,
+ TEGRA124_FUNC_RSVD,
+};
+
+static const unsigned int tegra124_otg_functions[] = {
+ TEGRA124_FUNC_SNPS,
+ TEGRA124_FUNC_XUSB,
+ TEGRA124_FUNC_UART,
+ TEGRA124_FUNC_RSVD,
+};
+
+static const unsigned int tegra124_usb_functions[] = {
+ TEGRA124_FUNC_SNPS,
+ TEGRA124_FUNC_XUSB,
+};
+
+static const unsigned int tegra124_pci_functions[] = {
+ TEGRA124_FUNC_PCIE,
+ TEGRA124_FUNC_USB3,
+ TEGRA124_FUNC_SATA,
+ TEGRA124_FUNC_RSVD,
+};
+
+#define TEGRA124_LANE(_name, _offset, _shift, _mask, _iddq, _funcs) \
+ { \
+ .name = _name, \
+ .offset = _offset, \
+ .shift = _shift, \
+ .mask = _mask, \
+ .iddq = _iddq, \
+ .num_funcs = ARRAY_SIZE(tegra124_##_funcs##_functions), \
+ .funcs = tegra124_##_funcs##_functions, \
+ }
+
+static const struct tegra_xusb_padctl_lane tegra124_lanes[] = {
+ TEGRA124_LANE("otg-0", 0x004, 0, 0x3, 0, otg),
+ TEGRA124_LANE("otg-1", 0x004, 2, 0x3, 0, otg),
+ TEGRA124_LANE("otg-2", 0x004, 4, 0x3, 0, otg),
+ TEGRA124_LANE("ulpi-0", 0x004, 12, 0x1, 0, usb),
+ TEGRA124_LANE("hsic-0", 0x004, 14, 0x1, 0, usb),
+ TEGRA124_LANE("hsic-1", 0x004, 15, 0x1, 0, usb),
+ TEGRA124_LANE("pcie-0", 0x134, 16, 0x3, 1, pci),
+ TEGRA124_LANE("pcie-1", 0x134, 18, 0x3, 2, pci),
+ TEGRA124_LANE("pcie-2", 0x134, 20, 0x3, 3, pci),
+ TEGRA124_LANE("pcie-3", 0x134, 22, 0x3, 4, pci),
+ TEGRA124_LANE("pcie-4", 0x134, 24, 0x3, 5, pci),
+ TEGRA124_LANE("sata-0", 0x134, 26, 0x3, 6, pci),
+};
+
+static const struct tegra_xusb_padctl_soc tegra124_soc = {
+ .num_pins = ARRAY_SIZE(tegra124_pins),
+ .pins = tegra124_pins,
+ .num_functions = ARRAY_SIZE(tegra124_functions),
+ .functions = tegra124_functions,
+ .num_lanes = ARRAY_SIZE(tegra124_lanes),
+ .lanes = tegra124_lanes,
+};
+
+static const struct of_device_id tegra_xusb_padctl_of_match[] = {
+ { .compatible = "nvidia,tegra124-xusb-padctl", .data = &tegra124_soc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
+
+static int tegra_xusb_padctl_probe(struct platform_device *pdev)
+{
+ struct tegra_xusb_padctl *padctl;
+ const struct of_device_id *match;
+ struct resource *res;
+ struct phy *phy;
+ int err;
+
+ padctl = devm_kzalloc(&pdev->dev, sizeof(*padctl), GFP_KERNEL);
+ if (!padctl)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, padctl);
+ mutex_init(&padctl->lock);
+ padctl->dev = &pdev->dev;
+
+ match = of_match_node(tegra_xusb_padctl_of_match, pdev->dev.of_node);
+ padctl->soc = match->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ padctl->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(padctl->regs))
+ return PTR_ERR(padctl->regs);
+
+ padctl->rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(padctl->rst))
+ return PTR_ERR(padctl->rst);
+
+ err = reset_control_deassert(padctl->rst);
+ if (err < 0)
+ return err;
+
+ memset(&padctl->desc, 0, sizeof(padctl->desc));
+ padctl->desc.name = dev_name(padctl->dev);
+ padctl->desc.pctlops = &tegra_xusb_padctl_pinctrl_ops;
+ padctl->desc.pmxops = &tegra_xusb_padctl_pinmux_ops;
+ padctl->desc.confops = &tegra_xusb_padctl_pinconf_ops;
+ padctl->desc.owner = THIS_MODULE;
+
+ padctl->pinctrl = pinctrl_register(&padctl->desc, &pdev->dev, padctl);
+ if (!padctl->pinctrl) {
+ dev_err(&pdev->dev, "failed to register pincontrol\n");
+ err = -ENODEV;
+ goto reset;
+ }
+
+ phy = devm_phy_create(&pdev->dev, NULL, &pcie_phy_ops, NULL);
+ if (IS_ERR(phy)) {
+ err = PTR_ERR(phy);
+ goto unregister;
+ }
+
+ padctl->phys[TEGRA_XUSB_PADCTL_PCIE] = phy;
+ phy_set_drvdata(phy, padctl);
+
+ phy = devm_phy_create(&pdev->dev, NULL, &sata_phy_ops, NULL);
+ if (IS_ERR(phy)) {
+ err = PTR_ERR(phy);
+ goto unregister;
+ }
+
+ padctl->phys[TEGRA_XUSB_PADCTL_SATA] = phy;
+ phy_set_drvdata(phy, padctl);
+
+ padctl->provider = devm_of_phy_provider_register(&pdev->dev,
+ tegra_xusb_padctl_xlate);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to register PHYs: %d\n", err);
+ goto unregister;
+ }
+
+ return 0;
+
+unregister:
+ pinctrl_unregister(padctl->pinctrl);
+reset:
+ reset_control_assert(padctl->rst);
+ return err;
+}
+
+static int tegra_xusb_padctl_remove(struct platform_device *pdev)
+{
+ struct tegra_xusb_padctl *padctl = platform_get_drvdata(pdev);
+ int err;
+
+ pinctrl_unregister(padctl->pinctrl);
+
+ err = reset_control_assert(padctl->rst);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to assert reset: %d\n", err);
+
+ return err;
+}
+
+static struct platform_driver tegra_xusb_padctl_driver = {
+ .driver = {
+ .name = "tegra-xusb-padctl",
+ .of_match_table = tegra_xusb_padctl_of_match,
+ },
+ .probe = tegra_xusb_padctl_probe,
+ .remove = tegra_xusb_padctl_remove,
+};
+module_platform_driver(tegra_xusb_padctl_driver);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("Tegra 124 XUSB Pad Control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 2d43bff74f59..150af5503c09 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -290,24 +290,11 @@ static int tegra_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
return 0;
}
-static void tegra_pinctrl_disable(struct pinctrl_dev *pctldev,
- unsigned function, unsigned group)
-{
- struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- const struct tegra_pingroup *g;
-
- g = &pmx->soc->groups[group];
-
- if (WARN_ON(g->mux_reg < 0))
- return;
-}
-
static const struct pinmux_ops tegra_pinmux_ops = {
.get_functions_count = tegra_pinctrl_get_funcs_count,
.get_function_name = tegra_pinctrl_get_func_name,
.get_function_groups = tegra_pinctrl_get_func_groups,
.enable = tegra_pinctrl_enable,
- .disable = tegra_pinctrl_disable,
};
static int tegra_pinconf_reg(struct tegra_pmx *pmx,
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index 5bf01c28925e..41e81a35cabb 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -574,33 +574,6 @@ static int tz1090_pdc_pinctrl_enable(struct pinctrl_dev *pctldev,
return 0;
}
-static void tz1090_pdc_pinctrl_disable(struct pinctrl_dev *pctldev,
- unsigned int function,
- unsigned int group)
-{
- struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- const struct tz1090_pdc_pingroup *grp = &tz1090_pdc_groups[group];
-
- dev_dbg(pctldev->dev, "%s(func=%u (%s), group=%u (%s))\n",
- __func__,
- function, tz1090_pdc_functions[function].name,
- group, tz1090_pdc_groups[group].name);
-
- /* is it even a mux? */
- if (grp->drv)
- return;
-
- /* does this group even control the function? */
- if (function != grp->func)
- return;
-
- /* record the pin being unmuxed and update mux bit */
- spin_lock(&pmx->lock);
- pmx->mux_en &= ~BIT(grp->pins[0]);
- tz1090_pdc_pinctrl_mux(pmx, grp);
- spin_unlock(&pmx->lock);
-}
-
static const struct tz1090_pdc_pingroup *find_mux_group(
struct tz1090_pdc_pmx *pmx,
unsigned int pin)
@@ -662,7 +635,6 @@ static struct pinmux_ops tz1090_pdc_pinmux_ops = {
.get_function_name = tz1090_pdc_pinctrl_get_func_name,
.get_function_groups = tz1090_pdc_pinctrl_get_func_groups,
.enable = tz1090_pdc_pinctrl_enable,
- .disable = tz1090_pdc_pinctrl_disable,
.gpio_request_enable = tz1090_pdc_pinctrl_gpio_request_enable,
.gpio_disable_free = tz1090_pdc_pinctrl_gpio_disable_free,
};
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index bc9cd7a7602e..24082216842e 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1479,63 +1479,6 @@ mux_pins:
}
/**
- * tz1090_pinctrl_disable() - Disable a function on a pin group.
- * @pctldev: Pin control data
- * @function: Function index to disable
- * @group: Group index to disable
- *
- * Disable a particular function on a group of pins. The per GPIO pin pseudo pin
- * groups can be used (in which case the pin will be taken out of peripheral
- * mode. Some convenience pin groups can also be used in which case the effect
- * is the same as enabling the function on each individual pin in the group.
- */
-static void tz1090_pinctrl_disable(struct pinctrl_dev *pctldev,
- unsigned int function, unsigned int group)
-{
- struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct tz1090_pingroup *grp;
- unsigned int pin_num, mux_group, i, npins;
- const unsigned int *pins;
-
- /* group of pins? */
- if (group < ARRAY_SIZE(tz1090_groups)) {
- grp = &tz1090_groups[group];
- npins = grp->npins;
- pins = grp->pins;
- /*
- * All pins in the group must belong to the same mux group,
- * which allows us to just use the mux group of the first pin.
- * By explicitly listing permitted pingroups for each function
- * the pinmux core should ensure this is always the case.
- */
- } else {
- pin_num = group - ARRAY_SIZE(tz1090_groups);
- npins = 1;
- pins = &pin_num;
- }
- mux_group = tz1090_mux_pins[*pins];
-
- /* no mux group, but can still be individually muxed to peripheral */
- if (mux_group >= TZ1090_MUX_GROUP_MAX) {
- if (function == TZ1090_MUX_PERIP)
- goto unmux_pins;
- return;
- }
-
- /* mux group already set to a different function? */
- grp = &tz1090_mux_groups[mux_group];
- dev_dbg(pctldev->dev, "%s: unmuxing %u pin(s) in '%s' from '%s'\n",
- __func__, npins, grp->name, tz1090_functions[function].name);
-
- /* subtract pins from ref count and unmux individually */
- WARN_ON(grp->func_count < npins);
- grp->func_count -= npins;
-unmux_pins:
- for (i = 0; i < npins; ++i)
- tz1090_pinctrl_perip_select(pmx, pins[i], false);
-}
-
-/**
* tz1090_pinctrl_gpio_request_enable() - Put pin in GPIO mode.
* @pctldev: Pin control data
* @range: GPIO range
@@ -1575,7 +1518,6 @@ static struct pinmux_ops tz1090_pinmux_ops = {
.get_function_name = tz1090_pinctrl_get_func_name,
.get_function_groups = tz1090_pinctrl_get_func_groups,
.enable = tz1090_pinctrl_enable,
- .disable = tz1090_pinctrl_disable,
.gpio_request_enable = tz1090_pinctrl_gpio_request_enable,
.gpio_disable_free = tz1090_pinctrl_gpio_disable_free,
};
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 209a01b8bd3b..0959bb36450f 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -970,19 +970,6 @@ static int u300_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
return 0;
}
-static void u300_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
-{
- struct u300_pmx *upmx;
-
- /* There is nothing to do with the power pins */
- if (selector == 0)
- return;
-
- upmx = pinctrl_dev_get_drvdata(pctldev);
- u300_pmx_endisable(upmx, selector, false);
-}
-
static int u300_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
{
return ARRAY_SIZE(u300_pmx_functions);
@@ -1008,7 +995,6 @@ static const struct pinmux_ops u300_pmx_ops = {
.get_function_name = u300_pmx_get_func_name,
.get_function_groups = u300_pmx_get_groups,
.enable = u300_pmx_enable,
- .disable = u300_pmx_disable,
};
static int u300_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 051e8592990e..c055daf9a80f 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -471,7 +471,6 @@ void pinmux_disable_setting(struct pinctrl_setting const *setting)
{
struct pinctrl_dev *pctldev = setting->pctldev;
const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- const struct pinmux_ops *ops = pctldev->desc->pmxops;
int ret = 0;
const unsigned *pins = NULL;
unsigned num_pins = 0;
@@ -518,9 +517,6 @@ void pinmux_disable_setting(struct pinctrl_setting const *setting)
pins[i], desc->name, gname);
}
}
-
- if (ops->disable)
- ops->disable(pctldev, setting->data.mux.func, setting->data.mux.group);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
new file mode 100644
index 000000000000..d160a710d704
--- /dev/null
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -0,0 +1,42 @@
+if (ARCH_QCOM || COMPILE_TEST)
+
+config PINCTRL_MSM
+ bool
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+
+config PINCTRL_APQ8064
+ tristate "Qualcomm APQ8064 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm APQ8064 platform.
+
+config PINCTRL_IPQ8064
+ tristate "Qualcomm IPQ8064 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm IPQ8064 platform.
+
+config PINCTRL_MSM8960
+ tristate "Qualcomm 8960 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm 8960 platform.
+
+config PINCTRL_MSM8X74
+ tristate "Qualcomm 8x74 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm TLMM block found in the Qualcomm 8974 platform.
+
+endif
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
new file mode 100644
index 000000000000..2a02602d715c
--- /dev/null
+++ b/drivers/pinctrl/qcom/Makefile
@@ -0,0 +1,6 @@
+# Qualcomm pin control drivers
+obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
+obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
+obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
+obj-$(CONFIG_PINCTRL_MSM8960) += pinctrl-msm8960.o
+obj-$(CONFIG_PINCTRL_MSM8X74) += pinctrl-msm8x74.o
diff --git a/drivers/pinctrl/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
index 519f7886b0f1..feb6f152f9b7 100644
--- a/drivers/pinctrl/pinctrl-apq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
@@ -230,7 +230,7 @@ static const unsigned int sdc3_data_pins[] = { 95 };
.pins = gpio##id##_pins, \
.npins = ARRAY_SIZE(gpio##id##_pins), \
.funcs = (int[]){ \
- APQ_MUX_NA, /* gpio mode */ \
+ APQ_MUX_gpio, \
APQ_MUX_##f1, \
APQ_MUX_##f2, \
APQ_MUX_##f3, \
@@ -293,6 +293,7 @@ enum apq8064_functions {
APQ_MUX_cam_mclk,
APQ_MUX_codec_mic_i2s,
APQ_MUX_codec_spkr_i2s,
+ APQ_MUX_gpio,
APQ_MUX_gsbi1,
APQ_MUX_gsbi2,
APQ_MUX_gsbi3,
@@ -335,6 +336,21 @@ static const char * const codec_mic_i2s_groups[] = {
static const char * const codec_spkr_i2s_groups[] = {
"gpio39", "gpio40", "gpio41", "gpio42"
};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89"
+};
static const char * const gsbi1_groups[] = {
"gpio18", "gpio19", "gpio20", "gpio21"
};
@@ -430,6 +446,7 @@ static const struct msm_function apq8064_functions[] = {
FUNCTION(cam_mclk),
FUNCTION(codec_mic_i2s),
FUNCTION(codec_spkr_i2s),
+ FUNCTION(gpio),
FUNCTION(gsbi1),
FUNCTION(gsbi2),
FUNCTION(gsbi3),
diff --git a/drivers/pinctrl/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index acafea4c3a33..767cf1120b20 100644
--- a/drivers/pinctrl/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -183,7 +183,7 @@ static const unsigned int sdc3_data_pins[] = { 71 };
.pins = gpio##id##_pins, \
.npins = ARRAY_SIZE(gpio##id##_pins), \
.funcs = (int[]){ \
- IPQ_MUX_NA, /* gpio mode */ \
+ IPQ_MUX_gpio, \
IPQ_MUX_##f1, \
IPQ_MUX_##f2, \
IPQ_MUX_##f3, \
@@ -243,6 +243,7 @@ static const unsigned int sdc3_data_pins[] = { 71 };
}
enum ipq8064_functions {
+ IPQ_MUX_gpio,
IPQ_MUX_mdio,
IPQ_MUX_mi2s,
IPQ_MUX_pdm,
@@ -291,6 +292,19 @@ enum ipq8064_functions {
IPQ_MUX_NA,
};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68"
+};
+
static const char * const mdio_groups[] = {
"gpio0", "gpio1", "gpio10", "gpio11",
};
@@ -481,6 +495,7 @@ static const char * const ps_hold_groups[] = {
};
static const struct msm_function ipq8064_functions[] = {
+ FUNCTION(gpio),
FUNCTION(mdio),
FUNCTION(ssbi),
FUNCTION(spmi),
diff --git a/drivers/pinctrl/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index df6dda4ce803..2738108caff2 100644
--- a/drivers/pinctrl/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -27,10 +27,10 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include "core.h"
-#include "pinconf.h"
+#include "../core.h"
+#include "../pinconf.h"
#include "pinctrl-msm.h"
-#include "pinctrl-utils.h"
+#include "../pinctrl-utils.h"
#define MAX_NR_GPIO 300
@@ -142,9 +142,6 @@ static int msm_pinmux_enable(struct pinctrl_dev *pctldev,
g = &pctrl->soc->groups[group];
- if (WARN_ON(g->mux_bit < 0))
- return -EINVAL;
-
for (i = 0; i < g->nfuncs; i++) {
if (g->funcs[i] == function)
break;
@@ -165,36 +162,11 @@ static int msm_pinmux_enable(struct pinctrl_dev *pctldev,
return 0;
}
-static void msm_pinmux_disable(struct pinctrl_dev *pctldev,
- unsigned function,
- unsigned group)
-{
- struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- const struct msm_pingroup *g;
- unsigned long flags;
- u32 val;
-
- g = &pctrl->soc->groups[group];
-
- if (WARN_ON(g->mux_bit < 0))
- return;
-
- spin_lock_irqsave(&pctrl->lock, flags);
-
- /* Clear the mux bits to select gpio mode */
- val = readl(pctrl->regs + g->ctl_reg);
- val &= ~(0x7 << g->mux_bit);
- writel(val, pctrl->regs + g->ctl_reg);
-
- spin_unlock_irqrestore(&pctrl->lock, flags);
-}
-
static const struct pinmux_ops msm_pinmux_ops = {
.get_functions_count = msm_get_functions_count,
.get_function_name = msm_get_function_name,
.get_function_groups = msm_get_function_groups,
.enable = msm_pinmux_enable,
- .disable = msm_pinmux_disable,
};
static int msm_config_reg(struct msm_pinctrl *pctrl,
@@ -206,6 +178,7 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_BUS_HOLD:
case PIN_CONFIG_BIAS_PULL_UP:
*bit = g->pull_bit;
*mask = 3;
@@ -243,6 +216,7 @@ static int msm_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
#define MSM_NO_PULL 0
#define MSM_PULL_DOWN 1
+#define MSM_KEEPER 2
#define MSM_PULL_UP 3
static unsigned msm_regval_to_drive(u32 val)
@@ -280,6 +254,9 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_DOWN:
arg = arg == MSM_PULL_DOWN;
break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ arg = arg == MSM_KEEPER;
+ break;
case PIN_CONFIG_BIAS_PULL_UP:
arg = arg == MSM_PULL_UP;
break;
@@ -339,6 +316,9 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_DOWN:
arg = MSM_PULL_DOWN;
break;
+ case PIN_CONFIG_BIAS_BUS_HOLD:
+ arg = MSM_KEEPER;
+ break;
case PIN_CONFIG_BIAS_PULL_UP:
arg = MSM_PULL_UP;
break;
diff --git a/drivers/pinctrl/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 7b2a227a590a..7b2a227a590a 100644
--- a/drivers/pinctrl/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
new file mode 100644
index 000000000000..35047036a053
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -0,0 +1,1282 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "pinctrl-msm.h"
+
+static const struct pinctrl_pin_desc msm8960_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+
+ PINCTRL_PIN(152, "SDC1_CLK"),
+ PINCTRL_PIN(153, "SDC1_CMD"),
+ PINCTRL_PIN(154, "SDC1_DATA"),
+ PINCTRL_PIN(155, "SDC3_CLK"),
+ PINCTRL_PIN(156, "SDC3_CMD"),
+ PINCTRL_PIN(157, "SDC3_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+
+static const unsigned int sdc1_clk_pins[] = { 152 };
+static const unsigned int sdc1_cmd_pins[] = { 153 };
+static const unsigned int sdc1_data_pins[] = { 154 };
+static const unsigned int sdc3_clk_pins[] = { 155 };
+static const unsigned int sdc3_cmd_pins[] = { 156 };
+static const unsigned int sdc3_data_pins[] = { 157 };
+
+#define FUNCTION(fname) \
+ [MSM_MUX_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ MSM_MUX_gpio, \
+ MSM_MUX_##f1, \
+ MSM_MUX_##f2, \
+ MSM_MUX_##f3, \
+ MSM_MUX_##f4, \
+ MSM_MUX_##f5, \
+ MSM_MUX_##f6, \
+ MSM_MUX_##f7, \
+ MSM_MUX_##f8, \
+ MSM_MUX_##f9, \
+ MSM_MUX_##f10, \
+ MSM_MUX_##f11 \
+ }, \
+ .nfuncs = 12, \
+ .ctl_reg = 0x1000 + 0x10 * id, \
+ .io_reg = 0x1004 + 0x10 * id, \
+ .intr_cfg_reg = 0x1008 + 0x10 * id, \
+ .intr_status_reg = 0x100c + 0x10 * id, \
+ .intr_target_reg = 0x400 + 0x4 * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_ack_high = 1, \
+ .intr_target_bit = 0, \
+ .intr_raw_status_bit = 3, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 1, \
+ }
+
+#define SDC_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+enum msm8960_functions {
+ MSM_MUX_audio_pcm,
+ MSM_MUX_bt,
+ MSM_MUX_cam_mclk0,
+ MSM_MUX_cam_mclk1,
+ MSM_MUX_cam_mclk2,
+ MSM_MUX_codec_mic_i2s,
+ MSM_MUX_codec_spkr_i2s,
+ MSM_MUX_ext_gps,
+ MSM_MUX_fm,
+ MSM_MUX_gps_blanking,
+ MSM_MUX_gps_pps_in,
+ MSM_MUX_gps_pps_out,
+ MSM_MUX_gp_clk_0a,
+ MSM_MUX_gp_clk_0b,
+ MSM_MUX_gp_clk_1a,
+ MSM_MUX_gp_clk_1b,
+ MSM_MUX_gp_clk_2a,
+ MSM_MUX_gp_clk_2b,
+ MSM_MUX_gp_mn,
+ MSM_MUX_gp_pdm_0a,
+ MSM_MUX_gp_pdm_0b,
+ MSM_MUX_gp_pdm_1a,
+ MSM_MUX_gp_pdm_1b,
+ MSM_MUX_gp_pdm_2a,
+ MSM_MUX_gp_pdm_2b,
+ MSM_MUX_gpio,
+ MSM_MUX_gsbi1,
+ MSM_MUX_gsbi1_spi_cs1_n,
+ MSM_MUX_gsbi1_spi_cs2a_n,
+ MSM_MUX_gsbi1_spi_cs2b_n,
+ MSM_MUX_gsbi1_spi_cs3_n,
+ MSM_MUX_gsbi2,
+ MSM_MUX_gsbi2_spi_cs1_n,
+ MSM_MUX_gsbi2_spi_cs2_n,
+ MSM_MUX_gsbi2_spi_cs3_n,
+ MSM_MUX_gsbi3,
+ MSM_MUX_gsbi4,
+ MSM_MUX_gsbi4_3d_cam_i2c_l,
+ MSM_MUX_gsbi4_3d_cam_i2c_r,
+ MSM_MUX_gsbi5,
+ MSM_MUX_gsbi5_3d_cam_i2c_l,
+ MSM_MUX_gsbi5_3d_cam_i2c_r,
+ MSM_MUX_gsbi6,
+ MSM_MUX_gsbi7,
+ MSM_MUX_gsbi8,
+ MSM_MUX_gsbi9,
+ MSM_MUX_gsbi10,
+ MSM_MUX_gsbi11,
+ MSM_MUX_gsbi11_spi_cs1a_n,
+ MSM_MUX_gsbi11_spi_cs1b_n,
+ MSM_MUX_gsbi11_spi_cs2a_n,
+ MSM_MUX_gsbi11_spi_cs2b_n,
+ MSM_MUX_gsbi11_spi_cs3_n,
+ MSM_MUX_gsbi12,
+ MSM_MUX_hdmi_cec,
+ MSM_MUX_hdmi_ddc_clock,
+ MSM_MUX_hdmi_ddc_data,
+ MSM_MUX_hdmi_hot_plug_detect,
+ MSM_MUX_hsic,
+ MSM_MUX_mdp_vsync,
+ MSM_MUX_mi2s,
+ MSM_MUX_mic_i2s,
+ MSM_MUX_pmb_clk,
+ MSM_MUX_pmb_ext_ctrl,
+ MSM_MUX_ps_hold,
+ MSM_MUX_rpm_wdog,
+ MSM_MUX_sdc2,
+ MSM_MUX_sdc4,
+ MSM_MUX_sdc5,
+ MSM_MUX_slimbus1,
+ MSM_MUX_slimbus2,
+ MSM_MUX_spkr_i2s,
+ MSM_MUX_ssbi1,
+ MSM_MUX_ssbi2,
+ MSM_MUX_ssbi_ext_gps,
+ MSM_MUX_ssbi_pmic2,
+ MSM_MUX_ssbi_qpa1,
+ MSM_MUX_ssbi_ts,
+ MSM_MUX_tsif1,
+ MSM_MUX_tsif2,
+ MSM_MUX_ts_eoc,
+ MSM_MUX_usb_fs1,
+ MSM_MUX_usb_fs1_oe,
+ MSM_MUX_usb_fs1_oe_n,
+ MSM_MUX_usb_fs2,
+ MSM_MUX_usb_fs2_oe,
+ MSM_MUX_usb_fs2_oe_n,
+ MSM_MUX_vfe_camif_timer1_a,
+ MSM_MUX_vfe_camif_timer1_b,
+ MSM_MUX_vfe_camif_timer2,
+ MSM_MUX_vfe_camif_timer3_a,
+ MSM_MUX_vfe_camif_timer3_b,
+ MSM_MUX_vfe_camif_timer4_a,
+ MSM_MUX_vfe_camif_timer4_b,
+ MSM_MUX_vfe_camif_timer4_c,
+ MSM_MUX_vfe_camif_timer5_a,
+ MSM_MUX_vfe_camif_timer5_b,
+ MSM_MUX_vfe_camif_timer6_a,
+ MSM_MUX_vfe_camif_timer6_b,
+ MSM_MUX_vfe_camif_timer6_c,
+ MSM_MUX_vfe_camif_timer7_a,
+ MSM_MUX_vfe_camif_timer7_b,
+ MSM_MUX_vfe_camif_timer7_c,
+ MSM_MUX_wlan,
+ MSM_MUX_NA,
+};
+
+static const char * const audio_pcm_groups[] = {
+ "gpio63", "gpio64", "gpio65", "gpio66"
+};
+
+static const char * const bt_groups[] = {
+ "gpio28", "gpio29", "gpio83"
+};
+
+static const char * const cam_mclk0_groups[] = {
+ "gpio5"
+};
+
+static const char * const cam_mclk1_groups[] = {
+ "gpio4"
+};
+
+static const char * const cam_mclk2_groups[] = {
+ "gpio2"
+};
+
+static const char * const codec_mic_i2s_groups[] = {
+ "gpio54", "gpio55", "gpio56", "gpio57", "gpio58"
+};
+
+static const char * const codec_spkr_i2s_groups[] = {
+ "gpio59", "gpio60", "gpio61", "gpio62"
+};
+
+static const char * const ext_gps_groups[] = {
+ "gpio22", "gpio23", "gpio24", "gpio25"
+};
+
+static const char * const fm_groups[] = {
+ "gpio26", "gpio27"
+};
+
+static const char * const gps_blanking_groups[] = {
+ "gpio137"
+};
+
+static const char * const gps_pps_in_groups[] = {
+ "gpio37"
+};
+
+static const char * const gps_pps_out_groups[] = {
+ "gpio37"
+};
+
+static const char * const gp_clk_0a_groups[] = {
+ "gpio3"
+};
+
+static const char * const gp_clk_0b_groups[] = {
+ "gpio54"
+};
+
+static const char * const gp_clk_1a_groups[] = {
+ "gpio4"
+};
+
+static const char * const gp_clk_1b_groups[] = {
+ "gpio70"
+};
+
+static const char * const gp_clk_2a_groups[] = {
+ "gpio52"
+};
+
+static const char * const gp_clk_2b_groups[] = {
+ "gpio37"
+};
+
+static const char * const gp_mn_groups[] = {
+ "gpio2"
+};
+
+static const char * const gp_pdm_0a_groups[] = {
+ "gpio58"
+};
+
+static const char * const gp_pdm_0b_groups[] = {
+ "gpio39"
+};
+
+static const char * const gp_pdm_1a_groups[] = {
+ "gpio94"
+};
+
+static const char * const gp_pdm_1b_groups[] = {
+ "gpio64"
+};
+
+static const char * const gp_pdm_2a_groups[] = {
+ "gpio69"
+};
+
+static const char * const gp_pdm_2b_groups[] = {
+ "gpio53"
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151"
+};
+
+static const char * const gsbi1_groups[] = {
+ "gpio6", "gpio7", "gpio8", "gpio9"
+};
+
+static const char * const gsbi1_spi_cs1_n_groups[] = {
+ "gpio14"
+};
+
+static const char * const gsbi1_spi_cs2a_n_groups[] = {
+ "gpio15"
+};
+
+static const char * const gsbi1_spi_cs2b_n_groups[] = {
+ "gpio17"
+};
+
+static const char * const gsbi1_spi_cs3_n_groups[] = {
+ "gpio16"
+};
+
+static const char * const gsbi2_groups[] = {
+ "gpio10", "gpio11", "gpio12", "gpio13"
+};
+
+static const char * const gsbi2_spi_cs1_n_groups[] = {
+ "gpio52"
+};
+
+static const char * const gsbi2_spi_cs2_n_groups[] = {
+ "gpio68"
+};
+
+static const char * const gsbi2_spi_cs3_n_groups[] = {
+ "gpio56"
+};
+
+static const char * const gsbi3_groups[] = {
+ "gpio14", "gpio15", "gpio16", "gpio17"
+};
+
+static const char * const gsbi4_groups[] = {
+ "gpio18", "gpio19", "gpio20", "gpio21"
+};
+
+static const char * const gsbi4_3d_cam_i2c_l_groups[] = {
+ "gpio18", "gpio19"
+};
+
+static const char * const gsbi4_3d_cam_i2c_r_groups[] = {
+ "gpio20", "gpio21"
+};
+
+static const char * const gsbi5_groups[] = {
+ "gpio22", "gpio23", "gpio24", "gpio25"
+};
+
+static const char * const gsbi5_3d_cam_i2c_l_groups[] = {
+ "gpio22", "gpio23"
+};
+
+static const char * const gsbi5_3d_cam_i2c_r_groups[] = {
+ "gpio24", "gpio25"
+};
+
+static const char * const gsbi6_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29"
+};
+
+static const char * const gsbi7_groups[] = {
+ "gpio30", "gpio31", "gpio32", "gpio33"
+};
+
+static const char * const gsbi8_groups[] = {
+ "gpio34", "gpio35", "gpio36", "gpio37"
+};
+
+static const char * const gsbi9_groups[] = {
+ "gpio93", "gpio94", "gpio95", "gpio96"
+};
+
+static const char * const gsbi10_groups[] = {
+ "gpio71", "gpio72", "gpio73", "gpio74"
+};
+
+static const char * const gsbi11_groups[] = {
+ "gpio38", "gpio39", "gpio40", "gpio41"
+};
+
+static const char * const gsbi11_spi_cs1a_n_groups[] = {
+ "gpio36"
+};
+
+static const char * const gsbi11_spi_cs1b_n_groups[] = {
+ "gpio18"
+};
+
+static const char * const gsbi11_spi_cs2a_n_groups[] = {
+ "gpio37"
+};
+
+static const char * const gsbi11_spi_cs2b_n_groups[] = {
+ "gpio19"
+};
+
+static const char * const gsbi11_spi_cs3_n_groups[] = {
+ "gpio76"
+};
+
+static const char * const gsbi12_groups[] = {
+ "gpio42", "gpio43", "gpio44", "gpio45"
+};
+
+static const char * const hdmi_cec_groups[] = {
+ "gpio99"
+};
+
+static const char * const hdmi_ddc_clock_groups[] = {
+ "gpio100"
+};
+
+static const char * const hdmi_ddc_data_groups[] = {
+ "gpio101"
+};
+
+static const char * const hdmi_hot_plug_detect_groups[] = {
+ "gpio102"
+};
+
+static const char * const hsic_groups[] = {
+ "gpio150", "gpio151"
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio0", "gpio1", "gpio19"
+};
+
+static const char * const mi2s_groups[] = {
+ "gpio47", "gpio48", "gpio49", "gpio50", "gpio51", "gpio52", "gpio53"
+};
+
+static const char * const mic_i2s_groups[] = {
+ "gpio71", "gpio72", "gpio73", "gpio74"
+};
+
+static const char * const pmb_clk_groups[] = {
+ "gpio21", "gpio86", "gpio112"
+};
+
+static const char * const pmb_ext_ctrl_groups[] = {
+ "gpio4", "gpio5"
+};
+
+static const char * const ps_hold_groups[] = {
+ "gpio108"
+};
+
+static const char * const rpm_wdog_groups[] = {
+ "gpio12"
+};
+
+static const char * const sdc2_groups[] = {
+ "gpio89", "gpio90", "gpio91", "gpio92", "gpio93", "gpio94", "gpio95",
+ "gpio96", "gpio97", "gpio98"
+};
+
+static const char * const sdc4_groups[] = {
+ "gpio83", "gpio84", "gpio85", "gpio86", "gpio87", "gpio88"
+};
+
+static const char * const sdc5_groups[] = {
+ "gpio77", "gpio78", "gpio79", "gpio80", "gpio81", "gpio82"
+};
+
+static const char * const slimbus1_groups[] = {
+ "gpio50", "gpio51", "gpio60", "gpio61"
+};
+
+static const char * const slimbus2_groups[] = {
+ "gpio42", "gpio43"
+};
+
+static const char * const spkr_i2s_groups[] = {
+ "gpio67", "gpio68", "gpio69", "gpio70"
+};
+
+static const char * const ssbi1_groups[] = {
+ "gpio141", "gpio143"
+};
+
+static const char * const ssbi2_groups[] = {
+ "gpio140", "gpio142"
+};
+
+static const char * const ssbi_ext_gps_groups[] = {
+ "gpio23"
+};
+
+static const char * const ssbi_pmic2_groups[] = {
+ "gpio149"
+};
+
+static const char * const ssbi_qpa1_groups[] = {
+ "gpio131"
+};
+
+static const char * const ssbi_ts_groups[] = {
+ "gpio10"
+};
+
+static const char * const tsif1_groups[] = {
+ "gpio75", "gpio76", "gpio77", "gpio82"
+};
+
+static const char * const tsif2_groups[] = {
+ "gpio78", "gpio79", "gpio80", "gpio81"
+};
+
+static const char * const ts_eoc_groups[] = {
+ "gpio11"
+};
+
+static const char * const usb_fs1_groups[] = {
+ "gpio32", "gpio33"
+};
+
+static const char * const usb_fs1_oe_groups[] = {
+ "gpio31"
+};
+
+static const char * const usb_fs1_oe_n_groups[] = {
+ "gpio31"
+};
+
+static const char * const usb_fs2_groups[] = {
+ "gpio34", "gpio35"
+};
+
+static const char * const usb_fs2_oe_groups[] = {
+ "gpio36"
+};
+
+static const char * const usb_fs2_oe_n_groups[] = {
+ "gpio36"
+};
+
+static const char * const vfe_camif_timer1_a_groups[] = {
+ "gpio2"
+};
+
+static const char * const vfe_camif_timer1_b_groups[] = {
+ "gpio38"
+};
+
+static const char * const vfe_camif_timer2_groups[] = {
+ "gpio3"
+};
+
+static const char * const vfe_camif_timer3_a_groups[] = {
+ "gpio4"
+};
+
+static const char * const vfe_camif_timer3_b_groups[] = {
+ "gpio151"
+};
+
+static const char * const vfe_camif_timer4_a_groups[] = {
+ "gpio65"
+};
+
+static const char * const vfe_camif_timer4_b_groups[] = {
+ "gpio150"
+};
+
+static const char * const vfe_camif_timer4_c_groups[] = {
+ "gpio10"
+};
+
+static const char * const vfe_camif_timer5_a_groups[] = {
+ "gpio66"
+};
+
+static const char * const vfe_camif_timer5_b_groups[] = {
+ "gpio39"
+};
+
+static const char * const vfe_camif_timer6_a_groups[] = {
+ "gpio71"
+};
+
+static const char * const vfe_camif_timer6_b_groups[] = {
+ "gpio0"
+};
+
+static const char * const vfe_camif_timer6_c_groups[] = {
+ "gpio18"
+};
+
+static const char * const vfe_camif_timer7_a_groups[] = {
+ "gpio67"
+};
+
+static const char * const vfe_camif_timer7_b_groups[] = {
+ "gpio1"
+};
+
+static const char * const vfe_camif_timer7_c_groups[] = {
+ "gpio19"
+};
+
+static const char * const wlan_groups[] = {
+ "gpio84", "gpio85", "gpio86", "gpio87", "gpio88"
+};
+
+static const struct msm_function msm8960_functions[] = {
+ FUNCTION(audio_pcm),
+ FUNCTION(bt),
+ FUNCTION(cam_mclk0),
+ FUNCTION(cam_mclk1),
+ FUNCTION(cam_mclk2),
+ FUNCTION(codec_mic_i2s),
+ FUNCTION(codec_spkr_i2s),
+ FUNCTION(ext_gps),
+ FUNCTION(fm),
+ FUNCTION(gps_blanking),
+ FUNCTION(gps_pps_in),
+ FUNCTION(gps_pps_out),
+ FUNCTION(gp_clk_0a),
+ FUNCTION(gp_clk_0b),
+ FUNCTION(gp_clk_1a),
+ FUNCTION(gp_clk_1b),
+ FUNCTION(gp_clk_2a),
+ FUNCTION(gp_clk_2b),
+ FUNCTION(gp_mn),
+ FUNCTION(gp_pdm_0a),
+ FUNCTION(gp_pdm_0b),
+ FUNCTION(gp_pdm_1a),
+ FUNCTION(gp_pdm_1b),
+ FUNCTION(gp_pdm_2a),
+ FUNCTION(gp_pdm_2b),
+ FUNCTION(gpio),
+ FUNCTION(gsbi1),
+ FUNCTION(gsbi1_spi_cs1_n),
+ FUNCTION(gsbi1_spi_cs2a_n),
+ FUNCTION(gsbi1_spi_cs2b_n),
+ FUNCTION(gsbi1_spi_cs3_n),
+ FUNCTION(gsbi2),
+ FUNCTION(gsbi2_spi_cs1_n),
+ FUNCTION(gsbi2_spi_cs2_n),
+ FUNCTION(gsbi2_spi_cs3_n),
+ FUNCTION(gsbi3),
+ FUNCTION(gsbi4),
+ FUNCTION(gsbi4_3d_cam_i2c_l),
+ FUNCTION(gsbi4_3d_cam_i2c_r),
+ FUNCTION(gsbi5),
+ FUNCTION(gsbi5_3d_cam_i2c_l),
+ FUNCTION(gsbi5_3d_cam_i2c_r),
+ FUNCTION(gsbi6),
+ FUNCTION(gsbi7),
+ FUNCTION(gsbi8),
+ FUNCTION(gsbi9),
+ FUNCTION(gsbi10),
+ FUNCTION(gsbi11),
+ FUNCTION(gsbi11_spi_cs1a_n),
+ FUNCTION(gsbi11_spi_cs1b_n),
+ FUNCTION(gsbi11_spi_cs2a_n),
+ FUNCTION(gsbi11_spi_cs2b_n),
+ FUNCTION(gsbi11_spi_cs3_n),
+ FUNCTION(gsbi12),
+ FUNCTION(hdmi_cec),
+ FUNCTION(hdmi_ddc_clock),
+ FUNCTION(hdmi_ddc_data),
+ FUNCTION(hdmi_hot_plug_detect),
+ FUNCTION(hsic),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mi2s),
+ FUNCTION(mic_i2s),
+ FUNCTION(pmb_clk),
+ FUNCTION(pmb_ext_ctrl),
+ FUNCTION(ps_hold),
+ FUNCTION(rpm_wdog),
+ FUNCTION(sdc2),
+ FUNCTION(sdc4),
+ FUNCTION(sdc5),
+ FUNCTION(slimbus1),
+ FUNCTION(slimbus2),
+ FUNCTION(spkr_i2s),
+ FUNCTION(ssbi1),
+ FUNCTION(ssbi2),
+ FUNCTION(ssbi_ext_gps),
+ FUNCTION(ssbi_pmic2),
+ FUNCTION(ssbi_qpa1),
+ FUNCTION(ssbi_ts),
+ FUNCTION(tsif1),
+ FUNCTION(tsif2),
+ FUNCTION(ts_eoc),
+ FUNCTION(usb_fs1),
+ FUNCTION(usb_fs1_oe),
+ FUNCTION(usb_fs1_oe_n),
+ FUNCTION(usb_fs2),
+ FUNCTION(usb_fs2_oe),
+ FUNCTION(usb_fs2_oe_n),
+ FUNCTION(vfe_camif_timer1_a),
+ FUNCTION(vfe_camif_timer1_b),
+ FUNCTION(vfe_camif_timer2),
+ FUNCTION(vfe_camif_timer3_a),
+ FUNCTION(vfe_camif_timer3_b),
+ FUNCTION(vfe_camif_timer4_a),
+ FUNCTION(vfe_camif_timer4_b),
+ FUNCTION(vfe_camif_timer4_c),
+ FUNCTION(vfe_camif_timer5_a),
+ FUNCTION(vfe_camif_timer5_b),
+ FUNCTION(vfe_camif_timer6_a),
+ FUNCTION(vfe_camif_timer6_b),
+ FUNCTION(vfe_camif_timer6_c),
+ FUNCTION(vfe_camif_timer7_a),
+ FUNCTION(vfe_camif_timer7_b),
+ FUNCTION(vfe_camif_timer7_c),
+ FUNCTION(wlan),
+};
+
+static const struct msm_pingroup msm8960_groups[] = {
+ PINGROUP(0, mdp_vsync, vfe_camif_timer6_b, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(1, mdp_vsync, vfe_camif_timer7_b, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(2, vfe_camif_timer1_a, gp_mn, NA, cam_mclk2, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(3, vfe_camif_timer2, gp_clk_0a, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(4, vfe_camif_timer3_a, cam_mclk1, gp_clk_1a, pmb_ext_ctrl, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(5, cam_mclk0, pmb_ext_ctrl, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(6, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(7, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(8, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(9, gsbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(10, gsbi2, ssbi_ts, NA, vfe_camif_timer4_c, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(11, gsbi2, ts_eoc, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(12, gsbi2, rpm_wdog, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(13, gsbi2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(14, gsbi3, gsbi1_spi_cs1_n, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(15, gsbi3, gsbi1_spi_cs2a_n, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(16, gsbi3, gsbi1_spi_cs3_n, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(17, gsbi3, gsbi1_spi_cs2b_n, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(18, gsbi4, gsbi11_spi_cs1b_n, NA, NA, gsbi4_3d_cam_i2c_l, vfe_camif_timer6_c, NA, NA, NA, NA, NA),
+ PINGROUP(19, gsbi4, gsbi11_spi_cs2b_n, NA, mdp_vsync, NA, gsbi4_3d_cam_i2c_l, vfe_camif_timer7_c, NA, NA, NA, NA),
+ PINGROUP(20, gsbi4, gsbi4_3d_cam_i2c_r, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(21, gsbi4, pmb_clk, gsbi4_3d_cam_i2c_r, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(22, gsbi5, ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_l, NA),
+ PINGROUP(23, gsbi5, ssbi_ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_l, NA),
+ PINGROUP(24, gsbi5, ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_r, NA),
+ PINGROUP(25, gsbi5, ext_gps, NA, NA, NA, NA, NA, NA, NA, gsbi5_3d_cam_i2c_r, NA),
+ PINGROUP(26, fm, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(27, fm, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(28, bt, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(29, bt, gsbi6, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(30, gsbi7, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(31, gsbi7, usb_fs1_oe, usb_fs1_oe_n, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(32, gsbi7, usb_fs1, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, gsbi7, usb_fs1, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(34, gsbi8, usb_fs2, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(35, gsbi8, usb_fs2, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(36, gsbi8, usb_fs2_oe, usb_fs2_oe_n, gsbi11_spi_cs1a_n, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(37, gsbi8, gps_pps_out, gps_pps_in, gsbi11_spi_cs2a_n, gp_clk_2b, NA, NA, NA, NA, NA, NA),
+ PINGROUP(38, gsbi11, NA, NA, NA, NA, NA, NA, NA, NA, vfe_camif_timer1_b, NA),
+ PINGROUP(39, gsbi11, gp_pdm_0b, NA, NA, NA, NA, NA, NA, NA, NA, vfe_camif_timer5_b),
+ PINGROUP(40, gsbi11, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(41, gsbi11, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(42, gsbi12, slimbus2, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(43, gsbi12, slimbus2, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(44, gsbi12, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(45, gsbi12, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(46, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(47, mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(48, mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(49, mi2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(50, mi2s, slimbus1, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(51, mi2s, slimbus1, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(52, mi2s, gp_clk_2a, gsbi2_spi_cs1_n, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(53, mi2s, gp_pdm_2b, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(54, codec_mic_i2s, gp_clk_0b, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(55, codec_mic_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(56, codec_mic_i2s, gsbi2_spi_cs3_n, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(57, codec_mic_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(58, codec_mic_i2s, gp_pdm_0a, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(59, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(60, slimbus1, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(61, slimbus1, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(62, codec_spkr_i2s, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(63, audio_pcm, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(64, audio_pcm, gp_pdm_1b, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(65, audio_pcm, vfe_camif_timer4_a, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(66, audio_pcm, vfe_camif_timer5_a, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(67, spkr_i2s, vfe_camif_timer7_a, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(68, spkr_i2s, gsbi2_spi_cs2_n, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(69, spkr_i2s, gp_pdm_2a, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(70, spkr_i2s, gp_clk_1b, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(71, mic_i2s, gsbi10, vfe_camif_timer6_a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(72, mic_i2s, gsbi10, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(73, mic_i2s, gsbi10, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(74, mic_i2s, gsbi10, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(75, tsif1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(76, tsif1, gsbi11_spi_cs3_n, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(77, tsif1, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(78, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(79, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(80, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(81, tsif2, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(82, tsif1, sdc5, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(83, bt, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(84, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(85, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(86, wlan, sdc4, pmb_clk, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(87, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(88, wlan, sdc4, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(89, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(90, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(91, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(92, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(93, sdc2, gsbi9, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(94, sdc2, gsbi9, gp_pdm_1a, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(95, sdc2, gsbi9, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(96, sdc2, gsbi9, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(97, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(98, sdc2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(99, hdmi_cec, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(100, hdmi_ddc_clock, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(101, hdmi_ddc_data, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(102, hdmi_hot_plug_detect, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(103, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(104, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(105, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(106, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(107, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(108, ps_hold, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(109, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(112, NA, pmb_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(113, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(116, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(117, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(118, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(119, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(120, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(124, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(125, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(126, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(127, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(128, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(129, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(130, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(131, NA, ssbi_qpa1, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(132, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(133, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(134, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(135, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(136, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(137, gps_blanking, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(138, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(139, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(140, ssbi2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(141, ssbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(142, ssbi2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(143, ssbi1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(144, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(145, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(146, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(147, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(148, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(149, ssbi_pmic2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(150, hsic, NA, vfe_camif_timer4_b, NA, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(151, hsic, NA, vfe_camif_timer3_b, NA, NA, NA, NA, NA, NA, NA, NA),
+
+ SDC_PINGROUP(sdc1_clk, 0x20a0, 13, 6),
+ SDC_PINGROUP(sdc1_cmd, 0x20a0, 11, 3),
+ SDC_PINGROUP(sdc1_data, 0x20a0, 9, 0),
+
+ SDC_PINGROUP(sdc3_clk, 0x20a4, 14, 6),
+ SDC_PINGROUP(sdc3_cmd, 0x20a4, 11, 3),
+ SDC_PINGROUP(sdc3_data, 0x20a4, 9, 0),
+};
+
+#define NUM_GPIO_PINGROUPS 152
+
+static const struct msm_pinctrl_soc_data msm8960_pinctrl = {
+ .pins = msm8960_pins,
+ .npins = ARRAY_SIZE(msm8960_pins),
+ .functions = msm8960_functions,
+ .nfunctions = ARRAY_SIZE(msm8960_functions),
+ .groups = msm8960_groups,
+ .ngroups = ARRAY_SIZE(msm8960_groups),
+ .ngpios = NUM_GPIO_PINGROUPS,
+};
+
+static int msm8960_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &msm8960_pinctrl);
+}
+
+static const struct of_device_id msm8960_pinctrl_of_match[] = {
+ { .compatible = "qcom,msm8960-pinctrl", },
+ { },
+};
+
+static struct platform_driver msm8960_pinctrl_driver = {
+ .driver = {
+ .name = "msm8960-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = msm8960_pinctrl_of_match,
+ },
+ .probe = msm8960_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init msm8960_pinctrl_init(void)
+{
+ return platform_driver_register(&msm8960_pinctrl_driver);
+}
+arch_initcall(msm8960_pinctrl_init);
+
+static void __exit msm8960_pinctrl_exit(void)
+{
+ platform_driver_unregister(&msm8960_pinctrl_driver);
+}
+module_exit(msm8960_pinctrl_exit);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
+MODULE_DESCRIPTION("Qualcomm MSM8960 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msm8960_pinctrl_of_match);
diff --git a/drivers/pinctrl/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 418306911a6f..8c9720154d1e 100644
--- a/drivers/pinctrl/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -342,7 +342,7 @@ static const unsigned int sdc2_data_pins[] = { 151 };
.pins = gpio##id##_pins, \
.npins = ARRAY_SIZE(gpio##id##_pins), \
.funcs = (int[]){ \
- MSM_MUX_NA, /* gpio mode */ \
+ MSM_MUX_gpio, \
MSM_MUX_##f1, \
MSM_MUX_##f2, \
MSM_MUX_##f3, \
@@ -402,6 +402,7 @@ static const unsigned int sdc2_data_pins[] = { 151 };
* the pingroup table below.
*/
enum msm8x74_functions {
+ MSM_MUX_gpio,
MSM_MUX_cci_i2c0,
MSM_MUX_cci_i2c1,
MSM_MUX_blsp_i2c1,
@@ -509,6 +510,31 @@ enum msm8x74_functions {
MSM_MUX_NA,
};
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145"
+};
+
static const char * const blsp_uart1_groups[] = {
"gpio0", "gpio1", "gpio2", "gpio3"
};
@@ -728,6 +754,7 @@ static const char * const wlan_groups[] = {
static const char * const slimbus_groups[] = { "gpio70", "gpio71" };
static const struct msm_function msm8x74_functions[] = {
+ FUNCTION(gpio),
FUNCTION(cci_i2c0),
FUNCTION(cci_i2c1),
FUNCTION(uim1),
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
new file mode 100644
index 000000000000..d0461cd5d707
--- /dev/null
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -0,0 +1,28 @@
+#
+# Samsung Pin control drivers
+#
+config PINCTRL_SAMSUNG
+ bool
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_EXYNOS
+ bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440"
+ depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
+ select PINCTRL_SAMSUNG
+
+config PINCTRL_EXYNOS5440
+ bool "Samsung EXYNOS5440 SoC pinctrl driver"
+ depends on SOC_EXYNOS5440
+ select PINMUX
+ select PINCONF
+
+config PINCTRL_S3C24XX
+ bool "Samsung S3C24XX SoC pinctrl driver"
+ depends on ARCH_S3C24XX
+ select PINCTRL_SAMSUNG
+
+config PINCTRL_S3C64XX
+ bool "Samsung S3C64XX SoC pinctrl driver"
+ depends on ARCH_S3C64XX
+ select PINCTRL_SAMSUNG
diff --git a/drivers/pinctrl/samsung/Makefile b/drivers/pinctrl/samsung/Makefile
new file mode 100644
index 000000000000..70160c059edd
--- /dev/null
+++ b/drivers/pinctrl/samsung/Makefile
@@ -0,0 +1,7 @@
+# Samsung pin control drivers
+
+obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
+obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
+obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinctrl-exynos5440.o
+obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
+obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 9609c23834ce..003bfd874a61 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -33,6 +33,18 @@
#include "pinctrl-samsung.h"
#include "pinctrl-exynos.h"
+struct exynos_irq_chip {
+ struct irq_chip chip;
+
+ u32 eint_con;
+ u32 eint_mask;
+ u32 eint_pend;
+};
+
+static inline struct exynos_irq_chip *to_exynos_irq_chip(struct irq_chip *chip)
+{
+ return container_of(chip, struct exynos_irq_chip, chip);
+}
static struct samsung_pin_bank_type bank_type_off = {
.fld_width = { 4, 1, 2, 2, 2, 2, },
@@ -50,11 +62,13 @@ static const struct of_device_id exynos_wkup_irq_ids[] = {
{ }
};
-static void exynos_gpio_irq_mask(struct irq_data *irqd)
+static void exynos_irq_mask(struct irq_data *irqd)
{
+ struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+ struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned long reg_mask = d->ctrl->geint_mask + bank->eint_offset;
+ unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
unsigned long mask;
unsigned long flags;
@@ -67,20 +81,24 @@ static void exynos_gpio_irq_mask(struct irq_data *irqd)
spin_unlock_irqrestore(&bank->slock, flags);
}
-static void exynos_gpio_irq_ack(struct irq_data *irqd)
+static void exynos_irq_ack(struct irq_data *irqd)
{
+ struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+ struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned long reg_pend = d->ctrl->geint_pend + bank->eint_offset;
+ unsigned long reg_pend = our_chip->eint_pend + bank->eint_offset;
writel(1 << irqd->hwirq, d->virt_base + reg_pend);
}
-static void exynos_gpio_irq_unmask(struct irq_data *irqd)
+static void exynos_irq_unmask(struct irq_data *irqd)
{
+ struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+ struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned long reg_mask = d->ctrl->geint_mask + bank->eint_offset;
+ unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
unsigned long mask;
unsigned long flags;
@@ -93,7 +111,7 @@ static void exynos_gpio_irq_unmask(struct irq_data *irqd)
* masked.
*/
if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK)
- exynos_gpio_irq_ack(irqd);
+ exynos_irq_ack(irqd);
spin_lock_irqsave(&bank->slock, flags);
@@ -104,16 +122,17 @@ static void exynos_gpio_irq_unmask(struct irq_data *irqd)
spin_unlock_irqrestore(&bank->slock, flags);
}
-static int exynos_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
+static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
{
+ struct irq_chip *chip = irq_data_get_irq_chip(irqd);
+ struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
struct samsung_pin_bank_type *bank_type = bank->type;
struct samsung_pinctrl_drv_data *d = bank->drvdata;
- struct samsung_pin_ctrl *ctrl = d->ctrl;
unsigned int pin = irqd->hwirq;
unsigned int shift = EXYNOS_EINT_CON_LEN * pin;
unsigned int con, trig_type;
- unsigned long reg_con = ctrl->geint_con + bank->eint_offset;
+ unsigned long reg_con = our_chip->eint_con + bank->eint_offset;
unsigned long flags;
unsigned int mask;
@@ -167,12 +186,17 @@ static int exynos_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
/*
* irq_chip for gpio interrupts.
*/
-static struct irq_chip exynos_gpio_irq_chip = {
- .name = "exynos_gpio_irq_chip",
- .irq_unmask = exynos_gpio_irq_unmask,
- .irq_mask = exynos_gpio_irq_mask,
- .irq_ack = exynos_gpio_irq_ack,
- .irq_set_type = exynos_gpio_irq_set_type,
+static struct exynos_irq_chip exynos_gpio_irq_chip = {
+ .chip = {
+ .name = "exynos_gpio_irq_chip",
+ .irq_unmask = exynos_irq_unmask,
+ .irq_mask = exynos_irq_mask,
+ .irq_ack = exynos_irq_ack,
+ .irq_set_type = exynos_irq_set_type,
+ },
+ .eint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .eint_pend = EXYNOS_GPIO_EPEND_OFFSET,
};
static int exynos_gpio_irq_map(struct irq_domain *h, unsigned int virq,
@@ -181,7 +205,7 @@ static int exynos_gpio_irq_map(struct irq_domain *h, unsigned int virq,
struct samsung_pin_bank *b = h->host_data;
irq_set_chip_data(virq, b);
- irq_set_chip_and_handler(virq, &exynos_gpio_irq_chip,
+ irq_set_chip_and_handler(virq, &exynos_gpio_irq_chip.chip,
handle_level_irq);
set_irq_flags(virq, IRQF_VALID);
return 0;
@@ -202,7 +226,7 @@ static irqreturn_t exynos_eint_gpio_irq(int irq, void *data)
struct samsung_pin_bank *bank = ctrl->pin_banks;
unsigned int svc, group, pin, virq;
- svc = readl(d->virt_base + ctrl->svc);
+ svc = readl(d->virt_base + EXYNOS_SVC_OFFSET);
group = EXYNOS_SVC_GROUP(svc);
pin = svc & EXYNOS_SVC_NUM_MASK;
@@ -279,119 +303,6 @@ err_domains:
return ret;
}
-static void exynos_wkup_irq_mask(struct irq_data *irqd)
-{
- struct samsung_pin_bank *b = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = b->drvdata;
- unsigned long reg_mask = d->ctrl->weint_mask + b->eint_offset;
- unsigned long mask;
- unsigned long flags;
-
- spin_lock_irqsave(&b->slock, flags);
-
- mask = readl(d->virt_base + reg_mask);
- mask |= 1 << irqd->hwirq;
- writel(mask, d->virt_base + reg_mask);
-
- spin_unlock_irqrestore(&b->slock, flags);
-}
-
-static void exynos_wkup_irq_ack(struct irq_data *irqd)
-{
- struct samsung_pin_bank *b = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = b->drvdata;
- unsigned long pend = d->ctrl->weint_pend + b->eint_offset;
-
- writel(1 << irqd->hwirq, d->virt_base + pend);
-}
-
-static void exynos_wkup_irq_unmask(struct irq_data *irqd)
-{
- struct samsung_pin_bank *b = irq_data_get_irq_chip_data(irqd);
- struct samsung_pinctrl_drv_data *d = b->drvdata;
- unsigned long reg_mask = d->ctrl->weint_mask + b->eint_offset;
- unsigned long mask;
- unsigned long flags;
-
- /*
- * Ack level interrupts right before unmask
- *
- * If we don't do this we'll get a double-interrupt. Level triggered
- * interrupts must not fire an interrupt if the level is not
- * _currently_ active, even if it was active while the interrupt was
- * masked.
- */
- if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK)
- exynos_wkup_irq_ack(irqd);
-
- spin_lock_irqsave(&b->slock, flags);
-
- mask = readl(d->virt_base + reg_mask);
- mask &= ~(1 << irqd->hwirq);
- writel(mask, d->virt_base + reg_mask);
-
- spin_unlock_irqrestore(&b->slock, flags);
-}
-
-static int exynos_wkup_irq_set_type(struct irq_data *irqd, unsigned int type)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
- struct samsung_pin_bank_type *bank_type = bank->type;
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned int pin = irqd->hwirq;
- unsigned long reg_con = d->ctrl->weint_con + bank->eint_offset;
- unsigned long shift = EXYNOS_EINT_CON_LEN * pin;
- unsigned long con, trig_type;
- unsigned long flags;
- unsigned int mask;
-
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- trig_type = EXYNOS_EINT_EDGE_RISING;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- trig_type = EXYNOS_EINT_EDGE_FALLING;
- break;
- case IRQ_TYPE_EDGE_BOTH:
- trig_type = EXYNOS_EINT_EDGE_BOTH;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- trig_type = EXYNOS_EINT_LEVEL_HIGH;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- trig_type = EXYNOS_EINT_LEVEL_LOW;
- break;
- default:
- pr_err("unsupported external interrupt type\n");
- return -EINVAL;
- }
-
- if (type & IRQ_TYPE_EDGE_BOTH)
- __irq_set_handler_locked(irqd->irq, handle_edge_irq);
- else
- __irq_set_handler_locked(irqd->irq, handle_level_irq);
-
- con = readl(d->virt_base + reg_con);
- con &= ~(EXYNOS_EINT_CON_MASK << shift);
- con |= trig_type << shift;
- writel(con, d->virt_base + reg_con);
-
- reg_con = bank->pctl_offset + bank_type->reg_offset[PINCFG_TYPE_FUNC];
- shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
- mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
-
- spin_lock_irqsave(&bank->slock, flags);
-
- con = readl(d->virt_base + reg_con);
- con &= ~(mask << shift);
- con |= EXYNOS_EINT_FUNC << shift;
- writel(con, d->virt_base + reg_con);
-
- spin_unlock_irqrestore(&bank->slock, flags);
-
- return 0;
-}
-
static u32 exynos_eint_wake_mask = 0xffffffff;
u32 exynos_get_eint_wake_mask(void)
@@ -417,13 +328,18 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on)
/*
* irq_chip for wakeup interrupts
*/
-static struct irq_chip exynos_wkup_irq_chip = {
- .name = "exynos_wkup_irq_chip",
- .irq_unmask = exynos_wkup_irq_unmask,
- .irq_mask = exynos_wkup_irq_mask,
- .irq_ack = exynos_wkup_irq_ack,
- .irq_set_type = exynos_wkup_irq_set_type,
- .irq_set_wake = exynos_wkup_irq_set_wake,
+static struct exynos_irq_chip exynos_wkup_irq_chip = {
+ .chip = {
+ .name = "exynos_wkup_irq_chip",
+ .irq_unmask = exynos_irq_unmask,
+ .irq_mask = exynos_irq_mask,
+ .irq_ack = exynos_irq_ack,
+ .irq_set_type = exynos_irq_set_type,
+ .irq_set_wake = exynos_wkup_irq_set_wake,
+ },
+ .eint_con = EXYNOS_WKUP_ECON_OFFSET,
+ .eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+ .eint_pend = EXYNOS_WKUP_EPEND_OFFSET,
};
/* interrupt handler for wakeup interrupts 0..15 */
@@ -464,7 +380,6 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
struct irq_chip *chip = irq_get_chip(irq);
struct exynos_muxed_weint_data *eintd = irq_get_handler_data(irq);
struct samsung_pinctrl_drv_data *d = eintd->banks[0]->drvdata;
- struct samsung_pin_ctrl *ctrl = d->ctrl;
unsigned long pend;
unsigned long mask;
int i;
@@ -473,8 +388,10 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
for (i = 0; i < eintd->nr_banks; ++i) {
struct samsung_pin_bank *b = eintd->banks[i];
- pend = readl(d->virt_base + ctrl->weint_pend + b->eint_offset);
- mask = readl(d->virt_base + ctrl->weint_mask + b->eint_offset);
+ pend = readl(d->virt_base + EXYNOS_WKUP_EPEND_OFFSET
+ + b->eint_offset);
+ mask = readl(d->virt_base + EXYNOS_WKUP_EMASK_OFFSET
+ + b->eint_offset);
exynos_irq_demux_eint(pend & ~mask, b->irq_domain);
}
@@ -484,7 +401,8 @@ static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
static int exynos_wkup_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
- irq_set_chip_and_handler(virq, &exynos_wkup_irq_chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, &exynos_wkup_irq_chip.chip,
+ handle_level_irq);
irq_set_chip_data(virq, h->host_data);
set_irq_flags(virq, IRQF_VALID);
return 0;
@@ -703,13 +621,6 @@ struct samsung_pin_ctrl s5pv210_pin_ctrl[] = {
/* pin-controller instance 0 data */
.pin_banks = s5pv210_pin_bank,
.nr_banks = ARRAY_SIZE(s5pv210_pin_bank),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .weint_con = EXYNOS_WKUP_ECON_OFFSET,
- .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
- .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
@@ -758,10 +669,6 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = {
/* pin-controller instance 0 data */
.pin_banks = exynos3250_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos3250_pin_banks0),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -770,13 +677,6 @@ struct samsung_pin_ctrl exynos3250_pin_ctrl[] = {
/* pin-controller instance 1 data */
.pin_banks = exynos3250_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos3250_pin_banks1),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .weint_con = EXYNOS_WKUP_ECON_OFFSET,
- .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
- .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
@@ -843,10 +743,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
/* pin-controller instance 0 data */
.pin_banks = exynos4210_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos4210_pin_banks0),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -855,13 +751,6 @@ struct samsung_pin_ctrl exynos4210_pin_ctrl[] = {
/* pin-controller instance 1 data */
.pin_banks = exynos4210_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos4210_pin_banks1),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .weint_con = EXYNOS_WKUP_ECON_OFFSET,
- .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
- .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
@@ -942,10 +831,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
/* pin-controller instance 0 data */
.pin_banks = exynos4x12_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos4x12_pin_banks0),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -954,13 +839,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
/* pin-controller instance 1 data */
.pin_banks = exynos4x12_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos4x12_pin_banks1),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .weint_con = EXYNOS_WKUP_ECON_OFFSET,
- .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
- .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
@@ -970,10 +848,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
/* pin-controller instance 2 data */
.pin_banks = exynos4x12_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos4x12_pin_banks2),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -982,10 +856,6 @@ struct samsung_pin_ctrl exynos4x12_pin_ctrl[] = {
/* pin-controller instance 3 data */
.pin_banks = exynos4x12_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos4x12_pin_banks3),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -1058,13 +928,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
/* pin-controller instance 0 data */
.pin_banks = exynos5250_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos5250_pin_banks0),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .weint_con = EXYNOS_WKUP_ECON_OFFSET,
- .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
- .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
.suspend = exynos_pinctrl_suspend,
@@ -1074,10 +937,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
/* pin-controller instance 1 data */
.pin_banks = exynos5250_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5250_pin_banks1),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -1086,10 +945,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
/* pin-controller instance 2 data */
.pin_banks = exynos5250_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5250_pin_banks2),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -1098,10 +953,6 @@ struct samsung_pin_ctrl exynos5250_pin_ctrl[] = {
/* pin-controller instance 3 data */
.pin_banks = exynos5250_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos5250_pin_banks3),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.suspend = exynos_pinctrl_suspend,
.resume = exynos_pinctrl_resume,
@@ -1158,13 +1009,6 @@ struct samsung_pin_ctrl exynos5260_pin_ctrl[] = {
/* pin-controller instance 0 data */
.pin_banks = exynos5260_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks0),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .weint_con = EXYNOS_WKUP_ECON_OFFSET,
- .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
- .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
.label = "exynos5260-gpio-ctrl0",
@@ -1172,20 +1016,12 @@ struct samsung_pin_ctrl exynos5260_pin_ctrl[] = {
/* pin-controller instance 1 data */
.pin_banks = exynos5260_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks1),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.label = "exynos5260-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5260_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5260_pin_banks2),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.label = "exynos5260-gpio-ctrl2",
},
@@ -1256,13 +1092,6 @@ struct samsung_pin_ctrl exynos5420_pin_ctrl[] = {
/* pin-controller instance 0 data */
.pin_banks = exynos5420_pin_banks0,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks0),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .weint_con = EXYNOS_WKUP_ECON_OFFSET,
- .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
- .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.eint_wkup_init = exynos_eint_wkup_init,
.label = "exynos5420-gpio-ctrl0",
@@ -1270,40 +1099,24 @@ struct samsung_pin_ctrl exynos5420_pin_ctrl[] = {
/* pin-controller instance 1 data */
.pin_banks = exynos5420_pin_banks1,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks1),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.label = "exynos5420-gpio-ctrl1",
}, {
/* pin-controller instance 2 data */
.pin_banks = exynos5420_pin_banks2,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks2),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.label = "exynos5420-gpio-ctrl2",
}, {
/* pin-controller instance 3 data */
.pin_banks = exynos5420_pin_banks3,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks3),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.label = "exynos5420-gpio-ctrl3",
}, {
/* pin-controller instance 4 data */
.pin_banks = exynos5420_pin_banks4,
.nr_banks = ARRAY_SIZE(exynos5420_pin_banks4),
- .geint_con = EXYNOS_GPIO_ECON_OFFSET,
- .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
- .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
- .svc = EXYNOS_SVC_OFFSET,
.eint_gpio_init = exynos_eint_gpio_init,
.label = "exynos5420-gpio-ctrl4",
},
diff --git a/drivers/pinctrl/pinctrl-exynos.h b/drivers/pinctrl/samsung/pinctrl-exynos.h
index 3c91c357792f..3c91c357792f 100644
--- a/drivers/pinctrl/pinctrl-exynos.h
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.h
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index 8fe2ab0a7698..603da2f9dd95 100644
--- a/drivers/pinctrl/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -23,7 +23,7 @@
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
-#include "core.h"
+#include "../core.h"
/* EXYNOS5440 GPIO and Pinctrl register offsets */
#define GPIO_MUX 0x00
@@ -371,13 +371,6 @@ static int exynos5440_pinmux_enable(struct pinctrl_dev *pctldev, unsigned select
return 0;
}
-/* disable a specified pinmux by writing to registers */
-static void exynos5440_pinmux_disable(struct pinctrl_dev *pctldev,
- unsigned selector, unsigned group)
-{
- exynos5440_pinmux_setup(pctldev, selector, group, false);
-}
-
/*
* The calls to gpio_direction_output() and gpio_direction_input()
* leads to this function call (via the pinctrl_gpio_direction_{input|output}()
@@ -395,7 +388,6 @@ static const struct pinmux_ops exynos5440_pinmux_ops = {
.get_function_name = exynos5440_pinmux_get_fname,
.get_function_groups = exynos5440_pinmux_get_groups,
.enable = exynos5440_pinmux_enable,
- .disable = exynos5440_pinmux_disable,
.gpio_set_direction = exynos5440_pinmux_gpio_set_direction,
};
diff --git a/drivers/pinctrl/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index ad3eaad17001..ad3eaad17001 100644
--- a/drivers/pinctrl/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
diff --git a/drivers/pinctrl/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 89143c903000..89143c903000 100644
--- a/drivers/pinctrl/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3e61d0f8f146..b07406da333c 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -30,7 +30,7 @@
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
-#include "core.h"
+#include "../core.h"
#include "pinctrl-samsung.h"
#define GROUP_SUFFIX "-grp"
@@ -40,13 +40,14 @@
/* list of all possible config options supported */
static struct pin_config {
- char *prop_cfg;
- unsigned int cfg_type;
-} pcfgs[] = {
+ const char *property;
+ enum pincfg_type param;
+} cfg_params[] = {
{ "samsung,pin-pud", PINCFG_TYPE_PUD },
{ "samsung,pin-drv", PINCFG_TYPE_DRV },
{ "samsung,pin-con-pdn", PINCFG_TYPE_CON_PDN },
{ "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN },
+ { "samsung,pin-val", PINCFG_TYPE_DAT },
};
/* Global list of devices (struct samsung_pinctrl_drv_data) */
@@ -59,163 +60,242 @@ static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc)
return container_of(gc, struct samsung_pin_bank, gpio_chip);
}
-/* check if the selector is a valid pin group selector */
static int samsung_get_group_count(struct pinctrl_dev *pctldev)
{
- struct samsung_pinctrl_drv_data *drvdata;
+ struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev);
- drvdata = pinctrl_dev_get_drvdata(pctldev);
- return drvdata->nr_groups;
+ return pmx->nr_groups;
}
-/* return the name of the group selected by the group selector */
static const char *samsung_get_group_name(struct pinctrl_dev *pctldev,
- unsigned selector)
+ unsigned group)
{
- struct samsung_pinctrl_drv_data *drvdata;
+ struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev);
- drvdata = pinctrl_dev_get_drvdata(pctldev);
- return drvdata->pin_groups[selector].name;
+ return pmx->pin_groups[group].name;
}
-/* return the pin numbers associated with the specified group */
static int samsung_get_group_pins(struct pinctrl_dev *pctldev,
- unsigned selector, const unsigned **pins, unsigned *num_pins)
+ unsigned group,
+ const unsigned **pins,
+ unsigned *num_pins)
{
- struct samsung_pinctrl_drv_data *drvdata;
+ struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pmx->pin_groups[group].pins;
+ *num_pins = pmx->pin_groups[group].num_pins;
- drvdata = pinctrl_dev_get_drvdata(pctldev);
- *pins = drvdata->pin_groups[selector].pins;
- *num_pins = drvdata->pin_groups[selector].num_pins;
return 0;
}
-/* create pinctrl_map entries by parsing device tree nodes */
-static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *np, struct pinctrl_map **maps,
- unsigned *nmaps)
+static int reserve_map(struct device *dev, struct pinctrl_map **map,
+ unsigned *reserved_maps, unsigned *num_maps,
+ unsigned reserve)
{
- struct device *dev = pctldev->dev;
- struct pinctrl_map *map;
- unsigned long *cfg = NULL;
- char *gname, *fname;
- int cfg_cnt = 0, map_cnt = 0, idx = 0;
-
- /* count the number of config options specfied in the node */
- for (idx = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
- if (of_find_property(np, pcfgs[idx].prop_cfg, NULL))
- cfg_cnt++;
- }
+ unsigned old_num = *reserved_maps;
+ unsigned new_num = *num_maps + reserve;
+ struct pinctrl_map *new_map;
- /*
- * Find out the number of map entries to create. All the config options
- * can be accomadated into a single config map entry.
- */
- if (cfg_cnt)
- map_cnt = 1;
- if (of_find_property(np, "samsung,pin-function", NULL))
- map_cnt++;
- if (!map_cnt) {
- dev_err(dev, "node %s does not have either config or function "
- "configurations\n", np->name);
- return -EINVAL;
- }
+ if (old_num >= new_num)
+ return 0;
- /* Allocate memory for pin-map entries */
- map = kzalloc(sizeof(*map) * map_cnt, GFP_KERNEL);
- if (!map) {
- dev_err(dev, "could not alloc memory for pin-maps\n");
+ new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
+ if (!new_map) {
+ dev_err(dev, "krealloc(map) failed\n");
return -ENOMEM;
}
- *nmaps = 0;
- /*
- * Allocate memory for pin group name. The pin group name is derived
- * from the node name from which these map entries are be created.
- */
- gname = kzalloc(strlen(np->name) + GSUFFIX_LEN, GFP_KERNEL);
- if (!gname) {
- dev_err(dev, "failed to alloc memory for group name\n");
- goto free_map;
+ memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
+
+ *map = new_map;
+ *reserved_maps = new_num;
+
+ return 0;
+}
+
+static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, const char *group,
+ const char *function)
+{
+ if (WARN_ON(*num_maps == *reserved_maps))
+ return -ENOSPC;
+
+ (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[*num_maps].data.mux.group = group;
+ (*map)[*num_maps].data.mux.function = function;
+ (*num_maps)++;
+
+ return 0;
+}
+
+static int add_map_configs(struct device *dev, struct pinctrl_map **map,
+ unsigned *reserved_maps, unsigned *num_maps,
+ const char *group, unsigned long *configs,
+ unsigned num_configs)
+{
+ unsigned long *dup_configs;
+
+ if (WARN_ON(*num_maps == *reserved_maps))
+ return -ENOSPC;
+
+ dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
+ GFP_KERNEL);
+ if (!dup_configs) {
+ dev_err(dev, "kmemdup(configs) failed\n");
+ return -ENOMEM;
}
- sprintf(gname, "%s%s", np->name, GROUP_SUFFIX);
- /*
- * don't have config options? then skip over to creating function
- * map entries.
- */
- if (!cfg_cnt)
- goto skip_cfgs;
-
- /* Allocate memory for config entries */
- cfg = kzalloc(sizeof(*cfg) * cfg_cnt, GFP_KERNEL);
- if (!cfg) {
- dev_err(dev, "failed to alloc memory for configs\n");
- goto free_gname;
+ (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ (*map)[*num_maps].data.configs.group_or_pin = group;
+ (*map)[*num_maps].data.configs.configs = dup_configs;
+ (*map)[*num_maps].data.configs.num_configs = num_configs;
+ (*num_maps)++;
+
+ return 0;
+}
+
+static int add_config(struct device *dev, unsigned long **configs,
+ unsigned *num_configs, unsigned long config)
+{
+ unsigned old_num = *num_configs;
+ unsigned new_num = old_num + 1;
+ unsigned long *new_configs;
+
+ new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
+ GFP_KERNEL);
+ if (!new_configs) {
+ dev_err(dev, "krealloc(configs) failed\n");
+ return -ENOMEM;
}
- /* Prepare a list of config settings */
- for (idx = 0, cfg_cnt = 0; idx < ARRAY_SIZE(pcfgs); idx++) {
- u32 value;
- if (!of_property_read_u32(np, pcfgs[idx].prop_cfg, &value))
- cfg[cfg_cnt++] =
- PINCFG_PACK(pcfgs[idx].cfg_type, value);
+ new_configs[old_num] = config;
+
+ *configs = new_configs;
+ *num_configs = new_num;
+
+ return 0;
+}
+
+static void samsung_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map,
+ unsigned num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++)
+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
+ kfree(map[i].data.configs.configs);
+
+ kfree(map);
+}
+
+static int samsung_dt_subnode_to_map(struct samsung_pinctrl_drv_data *drvdata,
+ struct device *dev,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ unsigned *reserved_maps,
+ unsigned *num_maps)
+{
+ int ret, i;
+ u32 val;
+ unsigned long config;
+ unsigned long *configs = NULL;
+ unsigned num_configs = 0;
+ unsigned reserve;
+ struct property *prop;
+ const char *group;
+ bool has_func = false;
+
+ ret = of_property_read_u32(np, "samsung,pin-function", &val);
+ if (!ret)
+ has_func = true;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
+ ret = of_property_read_u32(np, cfg_params[i].property, &val);
+ if (!ret) {
+ config = PINCFG_PACK(cfg_params[i].param, val);
+ ret = add_config(dev, &configs, &num_configs, config);
+ if (ret < 0)
+ goto exit;
+ /* EINVAL=missing, which is fine since it's optional */
+ } else if (ret != -EINVAL) {
+ dev_err(dev, "could not parse property %s\n",
+ cfg_params[i].property);
+ }
}
- /* create the config map entry */
- map[*nmaps].data.configs.group_or_pin = gname;
- map[*nmaps].data.configs.configs = cfg;
- map[*nmaps].data.configs.num_configs = cfg_cnt;
- map[*nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
- *nmaps += 1;
-
-skip_cfgs:
- /* create the function map entry */
- if (of_find_property(np, "samsung,pin-function", NULL)) {
- fname = kzalloc(strlen(np->name) + FSUFFIX_LEN, GFP_KERNEL);
- if (!fname) {
- dev_err(dev, "failed to alloc memory for func name\n");
- goto free_cfg;
+ reserve = 0;
+ if (has_func)
+ reserve++;
+ if (num_configs)
+ reserve++;
+ ret = of_property_count_strings(np, "samsung,pins");
+ if (ret < 0) {
+ dev_err(dev, "could not parse property samsung,pins\n");
+ goto exit;
+ }
+ reserve *= ret;
+
+ ret = reserve_map(dev, map, reserved_maps, num_maps, reserve);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "samsung,pins", prop, group) {
+ if (has_func) {
+ ret = add_map_mux(map, reserved_maps,
+ num_maps, group, np->full_name);
+ if (ret < 0)
+ goto exit;
}
- sprintf(fname, "%s%s", np->name, FUNCTION_SUFFIX);
- map[*nmaps].data.mux.group = gname;
- map[*nmaps].data.mux.function = fname;
- map[*nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
- *nmaps += 1;
+ if (num_configs) {
+ ret = add_map_configs(dev, map, reserved_maps,
+ num_maps, group, configs,
+ num_configs);
+ if (ret < 0)
+ goto exit;
+ }
}
- *maps = map;
- return 0;
+ ret = 0;
-free_cfg:
- kfree(cfg);
-free_gname:
- kfree(gname);
-free_map:
- kfree(map);
- return -ENOMEM;
+exit:
+ kfree(configs);
+ return ret;
}
-/* free the memory allocated to hold the pin-map table */
-static void samsung_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map, unsigned num_maps)
+static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
{
- int idx;
-
- for (idx = 0; idx < num_maps; idx++) {
- if (map[idx].type == PIN_MAP_TYPE_MUX_GROUP) {
- kfree(map[idx].data.mux.function);
- if (!idx)
- kfree(map[idx].data.mux.group);
- } else if (map->type == PIN_MAP_TYPE_CONFIGS_GROUP) {
- kfree(map[idx].data.configs.configs);
- if (!idx)
- kfree(map[idx].data.configs.group_or_pin);
+ struct samsung_pinctrl_drv_data *drvdata;
+ unsigned reserved_maps;
+ struct device_node *np;
+ int ret;
+
+ drvdata = pinctrl_dev_get_drvdata(pctldev);
+
+ reserved_maps = 0;
+ *map = NULL;
+ *num_maps = 0;
+
+ if (!of_get_child_count(np_config))
+ return samsung_dt_subnode_to_map(drvdata, pctldev->dev,
+ np_config, map,
+ &reserved_maps,
+ num_maps);
+
+ for_each_child_of_node(np_config, np) {
+ ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map,
+ &reserved_maps, num_maps);
+ if (ret < 0) {
+ samsung_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
}
- };
+ }
- kfree(map);
+ return 0;
}
/* list of pinctrl callbacks for the pinctrl core */
@@ -286,83 +366,21 @@ static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group, bool enable)
{
struct samsung_pinctrl_drv_data *drvdata;
- const unsigned int *pins;
- struct samsung_pin_bank *bank;
- void __iomem *reg;
- u32 mask, shift, data, pin_offset, cnt;
- unsigned long flags;
-
- drvdata = pinctrl_dev_get_drvdata(pctldev);
- pins = drvdata->pin_groups[group].pins;
-
- /*
- * for each pin in the pin group selected, program the correspoding pin
- * pin function number in the config register.
- */
- for (cnt = 0; cnt < drvdata->pin_groups[group].num_pins; cnt++) {
- struct samsung_pin_bank_type *type;
-
- pin_to_reg_bank(drvdata, pins[cnt] - drvdata->ctrl->base,
- &reg, &pin_offset, &bank);
- type = bank->type;
- mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
- shift = pin_offset * type->fld_width[PINCFG_TYPE_FUNC];
- if (shift >= 32) {
- /* Some banks have two config registers */
- shift -= 32;
- reg += 4;
- }
-
- spin_lock_irqsave(&bank->slock, flags);
-
- data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
- data &= ~(mask << shift);
- if (enable)
- data |= drvdata->pin_groups[group].func << shift;
- writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
-
- spin_unlock_irqrestore(&bank->slock, flags);
- }
-}
-
-/* enable a specified pinmux by writing to registers */
-static int samsung_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
-{
- samsung_pinmux_setup(pctldev, selector, group, true);
- return 0;
-}
-
-/* disable a specified pinmux by writing to registers */
-static void samsung_pinmux_disable(struct pinctrl_dev *pctldev,
- unsigned selector, unsigned group)
-{
- samsung_pinmux_setup(pctldev, selector, group, false);
-}
-
-/*
- * The calls to gpio_direction_output() and gpio_direction_input()
- * leads to this function call (via the pinctrl_gpio_direction_{input|output}()
- * function called from the gpiolib interface).
- */
-static int samsung_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range, unsigned offset, bool input)
-{
struct samsung_pin_bank_type *type;
struct samsung_pin_bank *bank;
- struct samsung_pinctrl_drv_data *drvdata;
void __iomem *reg;
- u32 data, pin_offset, mask, shift;
+ u32 mask, shift, data, pin_offset;
unsigned long flags;
+ const struct samsung_pmx_func *func;
+ const struct samsung_pin_group *grp;
- bank = gc_to_pin_bank(range->gc);
- type = bank->type;
drvdata = pinctrl_dev_get_drvdata(pctldev);
+ func = &drvdata->pmx_functions[selector];
+ grp = &drvdata->pin_groups[group];
- pin_offset = offset - bank->pin_base;
- reg = drvdata->virt_base + bank->pctl_offset +
- type->reg_offset[PINCFG_TYPE_FUNC];
-
+ pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->ctrl->base,
+ &reg, &pin_offset, &bank);
+ type = bank->type;
mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
shift = pin_offset * type->fld_width[PINCFG_TYPE_FUNC];
if (shift >= 32) {
@@ -373,14 +391,20 @@ static int samsung_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
spin_lock_irqsave(&bank->slock, flags);
- data = readl(reg);
+ data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
data &= ~(mask << shift);
- if (!input)
- data |= FUNC_OUTPUT << shift;
- writel(data, reg);
+ if (enable)
+ data |= func->val << shift;
+ writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
spin_unlock_irqrestore(&bank->slock, flags);
+}
+/* enable a specified pinmux by writing to registers */
+static int samsung_pinmux_enable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ samsung_pinmux_setup(pctldev, selector, group, true);
return 0;
}
@@ -390,8 +414,6 @@ static const struct pinmux_ops samsung_pinmux_ops = {
.get_function_name = samsung_pinmux_get_fname,
.get_function_groups = samsung_pinmux_get_groups,
.enable = samsung_pinmux_enable,
- .disable = samsung_pinmux_disable,
- .gpio_set_direction = samsung_pinmux_gpio_set_direction,
};
/* set or get the pin config settings for a specified pin */
@@ -540,25 +562,59 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
}
/*
- * gpiolib gpio_direction_input callback function. The setting of the pin
- * mux function as 'gpio input' will be handled by the pinctrl susbsystem
- * interface.
+ * The calls to gpio_direction_output() and gpio_direction_input()
+ * leads to this function call.
*/
+static int samsung_gpio_set_direction(struct gpio_chip *gc,
+ unsigned offset, bool input)
+{
+ struct samsung_pin_bank_type *type;
+ struct samsung_pin_bank *bank;
+ struct samsung_pinctrl_drv_data *drvdata;
+ void __iomem *reg;
+ u32 data, mask, shift;
+ unsigned long flags;
+
+ bank = gc_to_pin_bank(gc);
+ type = bank->type;
+ drvdata = bank->drvdata;
+
+ reg = drvdata->virt_base + bank->pctl_offset +
+ type->reg_offset[PINCFG_TYPE_FUNC];
+
+ mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+ shift = offset * type->fld_width[PINCFG_TYPE_FUNC];
+ if (shift >= 32) {
+ /* Some banks have two config registers */
+ shift -= 32;
+ reg += 4;
+ }
+
+ spin_lock_irqsave(&bank->slock, flags);
+
+ data = readl(reg);
+ data &= ~(mask << shift);
+ if (!input)
+ data |= FUNC_OUTPUT << shift;
+ writel(data, reg);
+
+ spin_unlock_irqrestore(&bank->slock, flags);
+
+ return 0;
+}
+
+/* gpiolib gpio_direction_input callback function. */
static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
- return pinctrl_gpio_direction_input(gc->base + offset);
+ return samsung_gpio_set_direction(gc, offset, true);
}
-/*
- * gpiolib gpio_direction_output callback function. The setting of the pin
- * mux function as 'gpio output' will be handled by the pinctrl susbsystem
- * interface.
- */
+/* gpiolib gpio_direction_output callback function. */
static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
samsung_gpio_set(gc, offset, value);
- return pinctrl_gpio_direction_output(gc->base + offset);
+ return samsung_gpio_set_direction(gc, offset, false);
}
/*
@@ -578,87 +634,115 @@ static int samsung_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return (virq) ? : -ENXIO;
}
-/*
- * Parse the pin names listed in the 'samsung,pins' property and convert it
- * into a list of gpio numbers are create a pin group from it.
- */
-static int samsung_pinctrl_parse_dt_pins(struct platform_device *pdev,
- struct device_node *cfg_np,
- struct pinctrl_desc *pctl,
- unsigned int **pin_list,
- unsigned int *npins)
+static struct samsung_pin_group *samsung_pinctrl_create_groups(
+ struct device *dev,
+ struct samsung_pinctrl_drv_data *drvdata,
+ unsigned int *cnt)
{
- struct device *dev = &pdev->dev;
- struct property *prop;
- struct pinctrl_pin_desc const *pdesc = pctl->pins;
- unsigned int idx = 0, cnt;
- const char *pin_name;
+ struct pinctrl_desc *ctrldesc = &drvdata->pctl;
+ struct samsung_pin_group *groups, *grp;
+ const struct pinctrl_pin_desc *pdesc;
+ int i;
- *npins = of_property_count_strings(cfg_np, "samsung,pins");
- if (IS_ERR_VALUE(*npins)) {
- dev_err(dev, "invalid pin list in %s node", cfg_np->name);
+ groups = devm_kzalloc(dev, ctrldesc->npins * sizeof(*groups),
+ GFP_KERNEL);
+ if (!groups)
+ return ERR_PTR(-EINVAL);
+ grp = groups;
+
+ pdesc = ctrldesc->pins;
+ for (i = 0; i < ctrldesc->npins; ++i, ++pdesc, ++grp) {
+ grp->name = pdesc->name;
+ grp->pins = &pdesc->number;
+ grp->num_pins = 1;
+ }
+
+ *cnt = ctrldesc->npins;
+ return groups;
+}
+
+static int samsung_pinctrl_create_function(struct device *dev,
+ struct samsung_pinctrl_drv_data *drvdata,
+ struct device_node *func_np,
+ struct samsung_pmx_func *func)
+{
+ int npins;
+ int ret;
+ int i;
+
+ if (of_property_read_u32(func_np, "samsung,pin-function", &func->val))
+ return 0;
+
+ npins = of_property_count_strings(func_np, "samsung,pins");
+ if (npins < 1) {
+ dev_err(dev, "invalid pin list in %s node", func_np->name);
return -EINVAL;
}
- *pin_list = devm_kzalloc(dev, *npins * sizeof(**pin_list), GFP_KERNEL);
- if (!*pin_list) {
- dev_err(dev, "failed to allocate memory for pin list\n");
+ func->name = func_np->full_name;
+
+ func->groups = devm_kzalloc(dev, npins * sizeof(char *), GFP_KERNEL);
+ if (!func->groups)
return -ENOMEM;
- }
- of_property_for_each_string(cfg_np, "samsung,pins", prop, pin_name) {
- for (cnt = 0; cnt < pctl->npins; cnt++) {
- if (pdesc[cnt].name) {
- if (!strcmp(pin_name, pdesc[cnt].name)) {
- (*pin_list)[idx++] = pdesc[cnt].number;
- break;
- }
- }
- }
- if (cnt == pctl->npins) {
- dev_err(dev, "pin %s not valid in %s node\n",
- pin_name, cfg_np->name);
- devm_kfree(dev, *pin_list);
- return -EINVAL;
+ for (i = 0; i < npins; ++i) {
+ const char *gname;
+
+ ret = of_property_read_string_index(func_np, "samsung,pins",
+ i, &gname);
+ if (ret) {
+ dev_err(dev,
+ "failed to read pin name %d from %s node\n",
+ i, func_np->name);
+ return ret;
}
+
+ func->groups[i] = gname;
}
- return 0;
+ func->num_groups = npins;
+ return 1;
}
-/*
- * Parse the information about all the available pin groups and pin functions
- * from device node of the pin-controller. A pin group is formed with all
- * the pins listed in the "samsung,pins" property.
- */
-static int samsung_pinctrl_parse_dt(struct platform_device *pdev,
- struct samsung_pinctrl_drv_data *drvdata)
+static struct samsung_pmx_func *samsung_pinctrl_create_functions(
+ struct device *dev,
+ struct samsung_pinctrl_drv_data *drvdata,
+ unsigned int *cnt)
{
- struct device *dev = &pdev->dev;
+ struct samsung_pmx_func *functions, *func;
struct device_node *dev_np = dev->of_node;
struct device_node *cfg_np;
- struct samsung_pin_group *groups, *grp;
- struct samsung_pmx_func *functions, *func;
- unsigned *pin_list;
- unsigned int npins, grp_cnt, func_idx = 0;
- char *gname, *fname;
+ unsigned int func_cnt = 0;
int ret;
- grp_cnt = of_get_child_count(dev_np);
- if (!grp_cnt)
- return -EINVAL;
+ /*
+ * Iterate over all the child nodes of the pin controller node
+ * and create pin groups and pin function lists.
+ */
+ for_each_child_of_node(dev_np, cfg_np) {
+ struct device_node *func_np;
- groups = devm_kzalloc(dev, grp_cnt * sizeof(*groups), GFP_KERNEL);
- if (!groups) {
- dev_err(dev, "failed allocate memory for ping group list\n");
- return -EINVAL;
+ if (!of_get_child_count(cfg_np)) {
+ if (!of_find_property(cfg_np,
+ "samsung,pin-function", NULL))
+ continue;
+ ++func_cnt;
+ continue;
+ }
+
+ for_each_child_of_node(cfg_np, func_np) {
+ if (!of_find_property(func_np,
+ "samsung,pin-function", NULL))
+ continue;
+ ++func_cnt;
+ }
}
- grp = groups;
- functions = devm_kzalloc(dev, grp_cnt * sizeof(*functions), GFP_KERNEL);
+ functions = devm_kzalloc(dev, func_cnt * sizeof(*functions),
+ GFP_KERNEL);
if (!functions) {
dev_err(dev, "failed to allocate memory for function list\n");
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
func = functions;
@@ -666,61 +750,68 @@ static int samsung_pinctrl_parse_dt(struct platform_device *pdev,
* Iterate over all the child nodes of the pin controller node
* and create pin groups and pin function lists.
*/
+ func_cnt = 0;
for_each_child_of_node(dev_np, cfg_np) {
- u32 function;
- if (!of_find_property(cfg_np, "samsung,pins", NULL))
+ struct device_node *func_np;
+
+ if (!of_get_child_count(cfg_np)) {
+ ret = samsung_pinctrl_create_function(dev, drvdata,
+ cfg_np, func);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret > 0) {
+ ++func;
+ ++func_cnt;
+ }
continue;
+ }
- ret = samsung_pinctrl_parse_dt_pins(pdev, cfg_np,
- &drvdata->pctl, &pin_list, &npins);
- if (ret)
- return ret;
-
- /* derive pin group name from the node name */
- gname = devm_kzalloc(dev, strlen(cfg_np->name) + GSUFFIX_LEN,
- GFP_KERNEL);
- if (!gname) {
- dev_err(dev, "failed to alloc memory for group name\n");
- return -ENOMEM;
+ for_each_child_of_node(cfg_np, func_np) {
+ ret = samsung_pinctrl_create_function(dev, drvdata,
+ func_np, func);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ if (ret > 0) {
+ ++func;
+ ++func_cnt;
+ }
}
- sprintf(gname, "%s%s", cfg_np->name, GROUP_SUFFIX);
+ }
- grp->name = gname;
- grp->pins = pin_list;
- grp->num_pins = npins;
- of_property_read_u32(cfg_np, "samsung,pin-function", &function);
- grp->func = function;
- grp++;
+ *cnt = func_cnt;
+ return functions;
+}
- if (!of_find_property(cfg_np, "samsung,pin-function", NULL))
- continue;
+/*
+ * Parse the information about all the available pin groups and pin functions
+ * from device node of the pin-controller. A pin group is formed with all
+ * the pins listed in the "samsung,pins" property.
+ */
- /* derive function name from the node name */
- fname = devm_kzalloc(dev, strlen(cfg_np->name) + FSUFFIX_LEN,
- GFP_KERNEL);
- if (!fname) {
- dev_err(dev, "failed to alloc memory for func name\n");
- return -ENOMEM;
- }
- sprintf(fname, "%s%s", cfg_np->name, FUNCTION_SUFFIX);
-
- func->name = fname;
- func->groups = devm_kzalloc(dev, sizeof(char *), GFP_KERNEL);
- if (!func->groups) {
- dev_err(dev, "failed to alloc memory for group list "
- "in pin function");
- return -ENOMEM;
- }
- func->groups[0] = gname;
- func->num_groups = 1;
- func++;
- func_idx++;
+static int samsung_pinctrl_parse_dt(struct platform_device *pdev,
+ struct samsung_pinctrl_drv_data *drvdata)
+{
+ struct device *dev = &pdev->dev;
+ struct samsung_pin_group *groups;
+ struct samsung_pmx_func *functions;
+ unsigned int grp_cnt = 0, func_cnt = 0;
+
+ groups = samsung_pinctrl_create_groups(dev, drvdata, &grp_cnt);
+ if (IS_ERR(groups)) {
+ dev_err(dev, "failed to parse pin groups\n");
+ return PTR_ERR(groups);
+ }
+
+ functions = samsung_pinctrl_create_functions(dev, drvdata, &func_cnt);
+ if (IS_ERR(functions)) {
+ dev_err(dev, "failed to parse pin functions\n");
+ return PTR_ERR(groups);
}
drvdata->pin_groups = groups;
drvdata->nr_groups = grp_cnt;
drvdata->pmx_functions = functions;
- drvdata->nr_functions = func_idx;
+ drvdata->nr_functions = func_cnt;
return 0;
}
@@ -790,7 +881,8 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
pin_bank = &drvdata->ctrl->pin_banks[bank];
pin_bank->grange.name = pin_bank->name;
pin_bank->grange.id = bank;
- pin_bank->grange.pin_base = pin_bank->pin_base;
+ pin_bank->grange.pin_base = drvdata->ctrl->base
+ + pin_bank->pin_base;
pin_bank->grange.base = pin_bank->gpio_chip.base;
pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
pin_bank->grange.gc = &pin_bank->gpio_chip;
@@ -800,7 +892,19 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
return 0;
}
+static int samsung_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void samsung_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
static const struct gpio_chip samsung_gpiolib_chip = {
+ .request = samsung_gpio_request,
+ .free = samsung_gpio_free,
.set = samsung_gpio_set,
.get = samsung_gpio_get,
.direction_input = samsung_gpio_direction_input,
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index b3e41fa5798b..2b882320e8e9 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -156,13 +156,6 @@ struct samsung_pin_bank {
* @nr_banks: number of pin banks.
* @base: starting system wide pin number.
* @nr_pins: number of pins supported by the controller.
- * @geint_con: offset of the ext-gpio controller registers.
- * @geint_mask: offset of the ext-gpio interrupt mask registers.
- * @geint_pend: offset of the ext-gpio interrupt pending registers.
- * @weint_con: offset of the ext-wakeup controller registers.
- * @weint_mask: offset of the ext-wakeup interrupt mask registers.
- * @weint_pend: offset of the ext-wakeup interrupt pending registers.
- * @svc: offset of the interrupt service register.
* @eint_gpio_init: platform specific callback to setup the external gpio
* interrupts for the controller.
* @eint_wkup_init: platform specific callback to setup the external wakeup
@@ -176,16 +169,6 @@ struct samsung_pin_ctrl {
u32 base;
u32 nr_pins;
- u32 geint_con;
- u32 geint_mask;
- u32 geint_pend;
-
- u32 weint_con;
- u32 weint_mask;
- u32 weint_pend;
-
- u32 svc;
-
int (*eint_gpio_init)(struct samsung_pinctrl_drv_data *);
int (*eint_wkup_init)(struct samsung_pinctrl_drv_data *);
void (*suspend)(struct samsung_pinctrl_drv_data *);
@@ -248,6 +231,7 @@ struct samsung_pmx_func {
const char *name;
const char **groups;
u8 num_groups;
+ u32 val;
};
/* list of all exported SoC specific data */
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index a9288ab01f7b..80f641ee4dea 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -409,11 +409,8 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
int sh_pfc_unregister_gpiochip(struct sh_pfc *pfc)
{
- int err;
- int ret;
-
- ret = gpiochip_remove(&pfc->gpio->gpio_chip);
- err = gpiochip_remove(&pfc->func->gpio_chip);
+ gpiochip_remove(&pfc->gpio->gpio_chip);
+ gpiochip_remove(&pfc->func->gpio_chip);
- return ret < 0 ? ret : err;
+ return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 2e688dc4a3c8..576d41b459e9 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1726,6 +1726,133 @@ static const unsigned int audio_clkout_mux[] = {
AUDIO_CLKOUT_MARK,
};
+/* - CAN -------------------------------------------------------------------- */
+
+static const unsigned int can0_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 29),
+};
+
+static const unsigned int can0_data_mux[] = {
+ CAN0_TX_MARK, CAN0_RX_MARK,
+};
+
+static const unsigned int can0_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(7, 4), RCAR_GP_PIN(7, 3),
+};
+
+static const unsigned int can0_data_b_mux[] = {
+ CAN0_TX_B_MARK, CAN0_RX_B_MARK,
+};
+
+static const unsigned int can0_data_c_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(5, 17), RCAR_GP_PIN(5, 18),
+};
+
+static const unsigned int can0_data_c_mux[] = {
+ CAN0_TX_C_MARK, CAN0_RX_C_MARK,
+};
+
+static const unsigned int can0_data_d_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(2, 26), RCAR_GP_PIN(2, 27),
+};
+
+static const unsigned int can0_data_d_mux[] = {
+ CAN0_TX_D_MARK, CAN0_RX_D_MARK,
+};
+
+static const unsigned int can0_data_e_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 28),
+};
+
+static const unsigned int can0_data_e_mux[] = {
+ CAN0_TX_E_MARK, CAN0_RX_E_MARK,
+};
+
+static const unsigned int can0_data_f_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(6, 7), RCAR_GP_PIN(6, 6),
+};
+
+static const unsigned int can0_data_f_mux[] = {
+ CAN0_TX_F_MARK, CAN0_RX_F_MARK,
+};
+
+static const unsigned int can1_data_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(3, 21), RCAR_GP_PIN(3, 20),
+};
+
+static const unsigned int can1_data_mux[] = {
+ CAN1_TX_MARK, CAN1_RX_MARK,
+};
+
+static const unsigned int can1_data_b_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(7, 8), RCAR_GP_PIN(7, 9),
+};
+
+static const unsigned int can1_data_b_mux[] = {
+ CAN1_TX_B_MARK, CAN1_RX_B_MARK,
+};
+
+static const unsigned int can1_data_c_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 19),
+};
+
+static const unsigned int can1_data_c_mux[] = {
+ CAN1_TX_C_MARK, CAN1_RX_C_MARK,
+};
+
+static const unsigned int can1_data_d_pins[] = {
+ /* TX, RX */
+ RCAR_GP_PIN(4, 29), RCAR_GP_PIN(4, 31),
+};
+
+static const unsigned int can1_data_d_mux[] = {
+ CAN1_TX_D_MARK, CAN1_RX_D_MARK,
+};
+
+static const unsigned int can_clk_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(7, 2),
+};
+
+static const unsigned int can_clk_mux[] = {
+ CAN_CLK_MARK,
+};
+
+static const unsigned int can_clk_b_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(5, 21),
+};
+
+static const unsigned int can_clk_b_mux[] = {
+ CAN_CLK_B_MARK,
+};
+
+static const unsigned int can_clk_c_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(4, 30),
+};
+
+static const unsigned int can_clk_c_mux[] = {
+ CAN_CLK_C_MARK,
+};
+
+static const unsigned int can_clk_d_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(7, 19),
+};
+
+static const unsigned int can_clk_d_mux[] = {
+ CAN_CLK_D_MARK,
+};
/* - DU --------------------------------------------------------------------- */
static const unsigned int du_rgb666_pins[] = {
@@ -1867,6 +1994,192 @@ static const unsigned int eth_rmii_mux[] = {
ETH_RXD0_MARK, ETH_RXD1_MARK, ETH_RX_ER_MARK, ETH_CRS_DV_MARK,
ETH_TXD0_MARK, ETH_TXD1_MARK, ETH_TX_EN_MARK, ETH_REFCLK_MARK,
};
+
+/* - HSCIF0 ----------------------------------------------------------------- */
+static const unsigned int hscif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(7, 3), RCAR_GP_PIN(7, 4),
+};
+static const unsigned int hscif0_data_mux[] = {
+ HRX0_MARK, HTX0_MARK,
+};
+static const unsigned int hscif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 2),
+};
+static const unsigned int hscif0_clk_mux[] = {
+ HSCK0_MARK,
+};
+static const unsigned int hscif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(7, 1), RCAR_GP_PIN(7, 0),
+};
+static const unsigned int hscif0_ctrl_mux[] = {
+ HRTS0_N_MARK, HCTS0_N_MARK,
+};
+static const unsigned int hscif0_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int hscif0_data_b_mux[] = {
+ HRX0_B_MARK, HTX0_B_MARK,
+};
+static const unsigned int hscif0_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 13),
+};
+static const unsigned int hscif0_ctrl_b_mux[] = {
+ HRTS0_N_B_MARK, HCTS0_N_B_MARK,
+};
+static const unsigned int hscif0_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int hscif0_data_c_mux[] = {
+ HRX0_C_MARK, HTX0_C_MARK,
+};
+static const unsigned int hscif0_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 31),
+};
+static const unsigned int hscif0_clk_c_mux[] = {
+ HSCK0_C_MARK,
+};
+/* - HSCIF1 ----------------------------------------------------------------- */
+static const unsigned int hscif1_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(7, 5), RCAR_GP_PIN(7, 6),
+};
+static const unsigned int hscif1_data_mux[] = {
+ HRX1_MARK, HTX1_MARK,
+};
+static const unsigned int hscif1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 7),
+};
+static const unsigned int hscif1_clk_mux[] = {
+ HSCK1_MARK,
+};
+static const unsigned int hscif1_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(7, 9), RCAR_GP_PIN(7, 8),
+};
+static const unsigned int hscif1_ctrl_mux[] = {
+ HRTS1_N_MARK, HCTS1_N_MARK,
+};
+static const unsigned int hscif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 17), RCAR_GP_PIN(1, 18),
+};
+static const unsigned int hscif1_data_b_mux[] = {
+ HRX1_B_MARK, HTX1_B_MARK,
+};
+static const unsigned int hscif1_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(7, 14), RCAR_GP_PIN(7, 15),
+};
+static const unsigned int hscif1_data_c_mux[] = {
+ HRX1_C_MARK, HTX1_C_MARK,
+};
+static const unsigned int hscif1_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(7, 16),
+};
+static const unsigned int hscif1_clk_c_mux[] = {
+ HSCK1_C_MARK,
+};
+static const unsigned int hscif1_ctrl_c_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(7, 18), RCAR_GP_PIN(7, 17),
+};
+static const unsigned int hscif1_ctrl_c_mux[] = {
+ HRTS1_N_C_MARK, HCTS1_N_C_MARK,
+};
+static const unsigned int hscif1_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 18),
+};
+static const unsigned int hscif1_data_d_mux[] = {
+ HRX1_D_MARK, HTX1_D_MARK,
+};
+static const unsigned int hscif1_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(7, 14), RCAR_GP_PIN(7, 15),
+};
+static const unsigned int hscif1_data_e_mux[] = {
+ HRX1_C_MARK, HTX1_C_MARK,
+};
+static const unsigned int hscif1_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 6),
+};
+static const unsigned int hscif1_clk_e_mux[] = {
+ HSCK1_E_MARK,
+};
+static const unsigned int hscif1_ctrl_e_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(2, 8), RCAR_GP_PIN(2, 7),
+};
+static const unsigned int hscif1_ctrl_e_mux[] = {
+ HRTS1_N_E_MARK, HCTS1_N_E_MARK,
+};
+/* - HSCIF2 ----------------------------------------------------------------- */
+static const unsigned int hscif2_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 16), RCAR_GP_PIN(4, 17),
+};
+static const unsigned int hscif2_data_mux[] = {
+ HRX2_MARK, HTX2_MARK,
+};
+static const unsigned int hscif2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 15),
+};
+static const unsigned int hscif2_clk_mux[] = {
+ HSCK2_MARK,
+};
+static const unsigned int hscif2_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(4, 14), RCAR_GP_PIN(4, 13),
+};
+static const unsigned int hscif2_ctrl_mux[] = {
+ HRTS2_N_MARK, HCTS2_N_MARK,
+};
+static const unsigned int hscif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 20), RCAR_GP_PIN(1, 22),
+};
+static const unsigned int hscif2_data_b_mux[] = {
+ HRX2_B_MARK, HTX2_B_MARK,
+};
+static const unsigned int hscif2_ctrl_b_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(1, 23), RCAR_GP_PIN(1, 21),
+};
+static const unsigned int hscif2_ctrl_b_mux[] = {
+ HRTS2_N_B_MARK, HCTS2_N_B_MARK,
+};
+static const unsigned int hscif2_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+};
+static const unsigned int hscif2_data_c_mux[] = {
+ HRX2_C_MARK, HTX2_C_MARK,
+};
+static const unsigned int hscif2_clk_c_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 31),
+};
+static const unsigned int hscif2_clk_c_mux[] = {
+ HSCK2_C_MARK,
+};
+static const unsigned int hscif2_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(1, 20), RCAR_GP_PIN(5, 31),
+};
+static const unsigned int hscif2_data_d_mux[] = {
+ HRX2_B_MARK, HTX2_D_MARK,
+};
/* - I2C0 ------------------------------------------------------------------- */
static const unsigned int i2c0_pins[] = {
/* SCL, SDA */
@@ -3869,6 +4182,20 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clk_b_b),
SH_PFC_PIN_GROUP(audio_clk_c),
SH_PFC_PIN_GROUP(audio_clkout),
+ SH_PFC_PIN_GROUP(can0_data),
+ SH_PFC_PIN_GROUP(can0_data_b),
+ SH_PFC_PIN_GROUP(can0_data_c),
+ SH_PFC_PIN_GROUP(can0_data_d),
+ SH_PFC_PIN_GROUP(can0_data_e),
+ SH_PFC_PIN_GROUP(can0_data_f),
+ SH_PFC_PIN_GROUP(can1_data),
+ SH_PFC_PIN_GROUP(can1_data_b),
+ SH_PFC_PIN_GROUP(can1_data_c),
+ SH_PFC_PIN_GROUP(can1_data_d),
+ SH_PFC_PIN_GROUP(can_clk),
+ SH_PFC_PIN_GROUP(can_clk_b),
+ SH_PFC_PIN_GROUP(can_clk_c),
+ SH_PFC_PIN_GROUP(can_clk_d),
SH_PFC_PIN_GROUP(du_rgb666),
SH_PFC_PIN_GROUP(du_rgb888),
SH_PFC_PIN_GROUP(du_clk_out_0),
@@ -3885,6 +4212,32 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(eth_magic),
SH_PFC_PIN_GROUP(eth_mdio),
SH_PFC_PIN_GROUP(eth_rmii),
+ SH_PFC_PIN_GROUP(hscif0_data),
+ SH_PFC_PIN_GROUP(hscif0_clk),
+ SH_PFC_PIN_GROUP(hscif0_ctrl),
+ SH_PFC_PIN_GROUP(hscif0_data_b),
+ SH_PFC_PIN_GROUP(hscif0_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif0_data_c),
+ SH_PFC_PIN_GROUP(hscif0_clk_c),
+ SH_PFC_PIN_GROUP(hscif1_data),
+ SH_PFC_PIN_GROUP(hscif1_clk),
+ SH_PFC_PIN_GROUP(hscif1_ctrl),
+ SH_PFC_PIN_GROUP(hscif1_data_b),
+ SH_PFC_PIN_GROUP(hscif1_data_c),
+ SH_PFC_PIN_GROUP(hscif1_clk_c),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_c),
+ SH_PFC_PIN_GROUP(hscif1_data_d),
+ SH_PFC_PIN_GROUP(hscif1_data_e),
+ SH_PFC_PIN_GROUP(hscif1_clk_e),
+ SH_PFC_PIN_GROUP(hscif1_ctrl_e),
+ SH_PFC_PIN_GROUP(hscif2_data),
+ SH_PFC_PIN_GROUP(hscif2_clk),
+ SH_PFC_PIN_GROUP(hscif2_ctrl),
+ SH_PFC_PIN_GROUP(hscif2_data_b),
+ SH_PFC_PIN_GROUP(hscif2_ctrl_b),
+ SH_PFC_PIN_GROUP(hscif2_data_c),
+ SH_PFC_PIN_GROUP(hscif2_clk_c),
+ SH_PFC_PIN_GROUP(hscif2_data_d),
SH_PFC_PIN_GROUP(i2c0),
SH_PFC_PIN_GROUP(i2c0_b),
SH_PFC_PIN_GROUP(i2c0_c),
@@ -4155,6 +4508,30 @@ static const char * const audio_clk_groups[] = {
"audio_clkout",
};
+static const char * const can0_groups[] = {
+ "can0_data_a",
+ "can0_data_b",
+ "can0_data_c",
+ "can0_data_d",
+ "can0_data_e",
+ "can0_data_f",
+ "can_clk_a",
+ "can_clk_b",
+ "can_clk_c",
+ "can_clk_d",
+};
+
+static const char * const can1_groups[] = {
+ "can1_data_a",
+ "can1_data_b",
+ "can1_data_c",
+ "can1_data_d",
+ "can_clk_a",
+ "can_clk_b",
+ "can_clk_c",
+ "can_clk_d",
+};
+
static const char * const du_groups[] = {
"du_rgb666",
"du_rgb888",
@@ -4183,6 +4560,41 @@ static const char * const eth_groups[] = {
"eth_rmii",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+ "hscif0_data_b",
+ "hscif0_ctrl_b",
+ "hscif0_data_c",
+ "hscif0_clk_c",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data",
+ "hscif1_clk",
+ "hscif1_ctrl",
+ "hscif1_data_b",
+ "hscif1_data_c",
+ "hscif1_clk_c",
+ "hscif1_ctrl_c",
+ "hscif1_data_d",
+ "hscif1_data_e",
+ "hscif1_clk_e",
+ "hscif1_ctrl_e",
+};
+
+static const char * const hscif2_groups[] = {
+ "hscif2_data",
+ "hscif2_clk",
+ "hscif2_ctrl",
+ "hscif2_data_b",
+ "hscif2_ctrl_b",
+ "hscif2_data_c",
+ "hscif2_clk_c",
+ "hscif2_data_d",
+};
+
static const char * const i2c0_groups[] = {
"i2c0",
"i2c0_b",
@@ -4543,10 +4955,15 @@ static const char * const vin2_groups[] = {
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
+ SH_PFC_FUNCTION(can0),
+ SH_PFC_FUNCTION(can1),
SH_PFC_FUNCTION(du),
SH_PFC_FUNCTION(du0),
SH_PFC_FUNCTION(du1),
SH_PFC_FUNCTION(eth),
+ SH_PFC_FUNCTION(hscif0),
+ SH_PFC_FUNCTION(hscif1),
+ SH_PFC_FUNCTION(hscif2),
SH_PFC_FUNCTION(i2c0),
SH_PFC_FUNCTION(i2c1),
SH_PFC_FUNCTION(i2c2),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index ee370de4609a..0bd8f4401b42 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -3842,7 +3842,8 @@ static int sh73a0_pinmux_soc_init(struct sh_pfc *pfc)
cfg.init_data = &sh73a0_vccq_mc0_init_data;
cfg.driver_data = pfc;
- data->vccq_mc0 = regulator_register(&sh73a0_vccq_mc0_desc, &cfg);
+ data->vccq_mc0 = devm_regulator_register(pfc->dev,
+ &sh73a0_vccq_mc0_desc, &cfg);
if (IS_ERR(data->vccq_mc0)) {
ret = PTR_ERR(data->vccq_mc0);
dev_err(pfc->dev, "Failed to register VCCQ MC0 regulator: %d\n",
@@ -3855,16 +3856,8 @@ static int sh73a0_pinmux_soc_init(struct sh_pfc *pfc)
return 0;
}
-static void sh73a0_pinmux_soc_exit(struct sh_pfc *pfc)
-{
- struct sh73a0_pinmux_data *data = pfc->soc_data;
-
- regulator_unregister(data->vccq_mc0);
-}
-
static const struct sh_pfc_soc_operations sh73a0_pinmux_ops = {
.init = sh73a0_pinmux_soc_init,
- .exit = sh73a0_pinmux_soc_exit,
.get_bias = sh73a0_pinmux_get_bias,
.set_bias = sh73a0_pinmux_set_bias,
};
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index e758af95c209..11db3ee39d40 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -345,27 +345,6 @@ done:
return ret;
}
-static void sh_pfc_func_disable(struct pinctrl_dev *pctldev, unsigned selector,
- unsigned group)
-{
- struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
- struct sh_pfc *pfc = pmx->pfc;
- const struct sh_pfc_pin_group *grp = &pfc->info->groups[group];
- unsigned long flags;
- unsigned int i;
-
- spin_lock_irqsave(&pfc->lock, flags);
-
- for (i = 0; i < grp->nr_pins; ++i) {
- int idx = sh_pfc_get_pin_index(pfc, grp->pins[i]);
- struct sh_pfc_pin_config *cfg = &pmx->configs[idx];
-
- cfg->type = PINMUX_TYPE_NONE;
- }
-
- spin_unlock_irqrestore(&pfc->lock, flags);
-}
-
static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset)
@@ -464,7 +443,6 @@ static const struct pinmux_ops sh_pfc_pinmux_ops = {
.get_function_name = sh_pfc_get_function_name,
.get_function_groups = sh_pfc_get_function_groups,
.enable = sh_pfc_func_enable,
- .disable = sh_pfc_func_disable,
.gpio_request_enable = sh_pfc_gpio_request_enable,
.gpio_disable_free = sh_pfc_gpio_disable_free,
.gpio_set_direction = sh_pfc_gpio_set_direction,
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 014f5b1fee55..4c1d7c68666d 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -186,15 +186,6 @@ static int sirfsoc_pinmux_enable(struct pinctrl_dev *pmxdev, unsigned selector,
return 0;
}
-static void sirfsoc_pinmux_disable(struct pinctrl_dev *pmxdev, unsigned selector,
- unsigned group)
-{
- struct sirfsoc_pmx *spmx;
-
- spmx = pinctrl_dev_get_drvdata(pmxdev);
- sirfsoc_pinmux_endisable(spmx, selector, false);
-}
-
static int sirfsoc_pinmux_get_funcs_count(struct pinctrl_dev *pmxdev)
{
return sirfsoc_pmxfunc_cnt;
@@ -240,7 +231,6 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
static struct pinmux_ops sirfsoc_pinmux_ops = {
.enable = sirfsoc_pinmux_enable,
- .disable = sirfsoc_pinmux_disable,
.get_functions_count = sirfsoc_pinmux_get_funcs_count,
.get_function_name = sirfsoc_pinmux_get_func_name,
.get_function_groups = sirfsoc_pinmux_get_groups,
diff --git a/drivers/pinctrl/spear/Kconfig b/drivers/pinctrl/spear/Kconfig
index 04d93e602674..9ef18eb958e1 100644
--- a/drivers/pinctrl/spear/Kconfig
+++ b/drivers/pinctrl/spear/Kconfig
@@ -48,6 +48,7 @@ config PINCTRL_SPEAR1340
config PINCTRL_SPEAR_PLGPIO
bool "SPEAr SoC PLGPIO Controller"
depends on GPIOLIB && PINCTRL_SPEAR
+ select GPIOLIB_IRQCHIP
help
Say yes here to support PLGPIO controller on ST Microelectronics SPEAr
SoCs.
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index ff2940e9f2a7..bddb79105d67 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -11,12 +11,11 @@
#include <linux/clk.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
@@ -54,7 +53,6 @@ struct plgpio_regs {
*
* lock: lock for guarding gpio registers
* base: base address of plgpio block
- * irq_base: irq number of plgpio0
* chip: gpio framework specific chip information structure
* p2o: function ptr for pin to offset conversion. This is required only for
* machines where mapping b/w pin and offset is not 1-to-1.
@@ -68,8 +66,6 @@ struct plgpio {
spinlock_t lock;
void __iomem *base;
struct clk *clk;
- unsigned irq_base;
- struct irq_domain *irq_domain;
struct gpio_chip chip;
int (*p2o)(int pin); /* pin_to_offset */
int (*o2p)(int offset); /* offset_to_pin */
@@ -280,21 +276,12 @@ disable_clk:
pinctrl_free_gpio(gpio);
}
-static int plgpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct plgpio *plgpio = container_of(chip, struct plgpio, chip);
-
- if (IS_ERR_VALUE(plgpio->irq_base))
- return -EINVAL;
-
- return irq_find_mapping(plgpio->irq_domain, offset);
-}
-
/* PLGPIO IRQ */
static void plgpio_irq_disable(struct irq_data *d)
{
- struct plgpio *plgpio = irq_data_get_irq_chip_data(d);
- int offset = d->irq - plgpio->irq_base;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct plgpio *plgpio = container_of(gc, struct plgpio, chip);
+ int offset = d->hwirq;
unsigned long flags;
/* get correct offset for "offset" pin */
@@ -311,8 +298,9 @@ static void plgpio_irq_disable(struct irq_data *d)
static void plgpio_irq_enable(struct irq_data *d)
{
- struct plgpio *plgpio = irq_data_get_irq_chip_data(d);
- int offset = d->irq - plgpio->irq_base;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct plgpio *plgpio = container_of(gc, struct plgpio, chip);
+ int offset = d->hwirq;
unsigned long flags;
/* get correct offset for "offset" pin */
@@ -329,8 +317,9 @@ static void plgpio_irq_enable(struct irq_data *d)
static int plgpio_irq_set_type(struct irq_data *d, unsigned trigger)
{
- struct plgpio *plgpio = irq_data_get_irq_chip_data(d);
- int offset = d->irq - plgpio->irq_base;
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct plgpio *plgpio = container_of(gc, struct plgpio, chip);
+ int offset = d->hwirq;
void __iomem *reg_off;
unsigned int supported_type = 0, val;
@@ -369,7 +358,8 @@ static struct irq_chip plgpio_irqchip = {
static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct plgpio *plgpio = irq_get_handler_data(irq);
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+ struct plgpio *plgpio = container_of(gc, struct plgpio, chip);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
int regs_count, count, pin, offset, i = 0;
unsigned long pending;
@@ -410,7 +400,8 @@ static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc)
/* get correct irq line number */
pin = i * MAX_GPIO_PER_REG + pin;
- generic_handle_irq(plgpio_to_irq(&plgpio->chip, pin));
+ generic_handle_irq(
+ irq_find_mapping(gc->irqdomain, pin));
}
}
chained_irq_exit(irqchip, desc);
@@ -523,10 +514,9 @@ end:
}
static int plgpio_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct plgpio *plgpio;
struct resource *res;
- int ret, irq, i;
+ int ret, irq;
plgpio = devm_kzalloc(&pdev->dev, sizeof(*plgpio), GFP_KERNEL);
if (!plgpio) {
@@ -563,7 +553,6 @@ static int plgpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, plgpio);
spin_lock_init(&plgpio->lock);
- plgpio->irq_base = -1;
plgpio->chip.base = -1;
plgpio->chip.request = plgpio_request;
plgpio->chip.free = plgpio_free;
@@ -571,10 +560,10 @@ static int plgpio_probe(struct platform_device *pdev)
plgpio->chip.direction_output = plgpio_direction_output;
plgpio->chip.get = plgpio_get_value;
plgpio->chip.set = plgpio_set_value;
- plgpio->chip.to_irq = plgpio_to_irq;
plgpio->chip.label = dev_name(&pdev->dev);
plgpio->chip.dev = &pdev->dev;
plgpio->chip.owner = THIS_MODULE;
+ plgpio->chip.of_node = pdev->dev.of_node;
if (!IS_ERR(plgpio->clk)) {
ret = clk_prepare(plgpio->clk);
@@ -592,43 +581,32 @@ static int plgpio_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_info(&pdev->dev, "irqs not supported\n");
- return 0;
- }
-
- plgpio->irq_base = irq_alloc_descs(-1, 0, plgpio->chip.ngpio, 0);
- if (IS_ERR_VALUE(plgpio->irq_base)) {
- /* we would not support irq for gpio */
- dev_warn(&pdev->dev, "couldn't allocate irq base\n");
+ dev_info(&pdev->dev, "PLGPIO registered without IRQs\n");
return 0;
}
- plgpio->irq_domain = irq_domain_add_legacy(np, plgpio->chip.ngpio,
- plgpio->irq_base, 0, &irq_domain_simple_ops, NULL);
- if (WARN_ON(!plgpio->irq_domain)) {
- dev_err(&pdev->dev, "irq domain init failed\n");
- irq_free_descs(plgpio->irq_base, plgpio->chip.ngpio);
- ret = -ENXIO;
+ ret = gpiochip_irqchip_add(&plgpio->chip,
+ &plgpio_irqchip,
+ 0,
+ handle_simple_irq,
+ IRQ_TYPE_NONE);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add irqchip to gpiochip\n");
goto remove_gpiochip;
}
- irq_set_chained_handler(irq, plgpio_irq_handler);
- for (i = 0; i < plgpio->chip.ngpio; i++) {
- irq_set_chip_and_handler(i + plgpio->irq_base, &plgpio_irqchip,
- handle_simple_irq);
- set_irq_flags(i + plgpio->irq_base, IRQF_VALID);
- irq_set_chip_data(i + plgpio->irq_base, plgpio);
- }
+ gpiochip_set_chained_irqchip(&plgpio->chip,
+ &plgpio_irqchip,
+ irq,
+ plgpio_irq_handler);
- irq_set_handler_data(irq, plgpio);
dev_info(&pdev->dev, "PLGPIO registered with IRQs\n");
return 0;
remove_gpiochip:
dev_info(&pdev->dev, "Remove gpiochip\n");
- if (gpiochip_remove(&plgpio->chip))
- dev_err(&pdev->dev, "unable to remove gpiochip\n");
+ gpiochip_remove(&plgpio->chip);
unprepare_clk:
if (!IS_ERR(plgpio->clk))
clk_unprepare(plgpio->clk);
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 58bf6867aa17..f72cc4e192bd 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -274,12 +274,6 @@ static int spear_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
return spear_pinctrl_endisable(pctldev, function, group, true);
}
-static void spear_pinctrl_disable(struct pinctrl_dev *pctldev,
- unsigned function, unsigned group)
-{
- spear_pinctrl_endisable(pctldev, function, group, false);
-}
-
/* gpio with pinmux */
static struct spear_gpio_pingroup *get_gpio_pingroup(struct spear_pmx *pmx,
unsigned pin)
@@ -345,7 +339,6 @@ static const struct pinmux_ops spear_pinmux_ops = {
.get_function_name = spear_pinctrl_get_func_name,
.get_function_groups = spear_pinctrl_get_func_groups,
.enable = spear_pinctrl_enable,
- .disable = spear_pinctrl_disable,
.gpio_request_enable = gpio_request_enable,
.gpio_disable_free = gpio_disable_free,
};
diff --git a/drivers/pinctrl/sunxi/Kconfig b/drivers/pinctrl/sunxi/Kconfig
index 73e0a305ea13..a5e10f777ed2 100644
--- a/drivers/pinctrl/sunxi/Kconfig
+++ b/drivers/pinctrl/sunxi/Kconfig
@@ -1,36 +1,42 @@
if ARCH_SUNXI
-config PINCTRL_SUNXI
- bool
-
config PINCTRL_SUNXI_COMMON
bool
select PINMUX
select GENERIC_PINCONF
config PINCTRL_SUN4I_A10
- def_bool PINCTRL_SUNXI || MACH_SUN4I
+ def_bool MACH_SUN4I
select PINCTRL_SUNXI_COMMON
config PINCTRL_SUN5I_A10S
- def_bool PINCTRL_SUNXI || MACH_SUN5I
+ def_bool MACH_SUN5I
select PINCTRL_SUNXI_COMMON
config PINCTRL_SUN5I_A13
- def_bool PINCTRL_SUNXI || MACH_SUN5I
+ def_bool MACH_SUN5I
select PINCTRL_SUNXI_COMMON
config PINCTRL_SUN6I_A31
- def_bool PINCTRL_SUNXI || MACH_SUN6I
+ def_bool MACH_SUN6I
select PINCTRL_SUNXI_COMMON
config PINCTRL_SUN6I_A31_R
- def_bool PINCTRL_SUNXI || MACH_SUN6I
+ def_bool MACH_SUN6I
depends on RESET_CONTROLLER
select PINCTRL_SUNXI_COMMON
config PINCTRL_SUN7I_A20
- def_bool PINCTRL_SUNXI || MACH_SUN7I
+ def_bool MACH_SUN7I
+ select PINCTRL_SUNXI_COMMON
+
+config PINCTRL_SUN8I_A23
+ def_bool MACH_SUN8I
+ select PINCTRL_SUNXI_COMMON
+
+config PINCTRL_SUN8I_A23_R
+ def_bool MACH_SUN8I
+ depends on RESET_CONTROLLER
select PINCTRL_SUNXI_COMMON
endif
diff --git a/drivers/pinctrl/sunxi/Makefile b/drivers/pinctrl/sunxi/Makefile
index 0f4461cbe11d..e797efb02901 100644
--- a/drivers/pinctrl/sunxi/Makefile
+++ b/drivers/pinctrl/sunxi/Makefile
@@ -8,3 +8,5 @@ obj-$(CONFIG_PINCTRL_SUN5I_A13) += pinctrl-sun5i-a13.o
obj-$(CONFIG_PINCTRL_SUN6I_A31) += pinctrl-sun6i-a31.o
obj-$(CONFIG_PINCTRL_SUN6I_A31_R) += pinctrl-sun6i-a31-r.o
obj-$(CONFIG_PINCTRL_SUN7I_A20) += pinctrl-sun7i-a20.o
+obj-$(CONFIG_PINCTRL_SUN8I_A23) += pinctrl-sun8i-a23.o
+obj-$(CONFIG_PINCTRL_SUN8I_A23_R) += pinctrl-sun8i-a23-r.o
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index fa1ff7c7e357..86b608bedca6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -1010,6 +1010,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
.pins = sun4i_a10_pins,
.npins = ARRAY_SIZE(sun4i_a10_pins),
+ .irq_banks = 1,
};
static int sun4i_a10_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
index 164d743f526c..2fa7430cabaf 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a10s.c
@@ -661,6 +661,7 @@ static const struct sunxi_desc_pin sun5i_a10s_pins[] = {
static const struct sunxi_pinctrl_desc sun5i_a10s_pinctrl_data = {
.pins = sun5i_a10s_pins,
.npins = ARRAY_SIZE(sun5i_a10s_pins),
+ .irq_banks = 1,
};
static int sun5i_a10s_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
index 1188a2b7b988..e47c33dbae3a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun5i-a13.c
@@ -330,15 +330,12 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = {
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION_IRQ(0x6, 0)), /* EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION_IRQ(0x6, 1)), /* EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
- SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION_IRQ(0x6, 2)), /* EINT2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -382,6 +379,7 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = {
static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = {
.pins = sun5i_a13_pins,
.npins = ARRAY_SIZE(sun5i_a13_pins),
+ .irq_banks = 1,
};
static int sun5i_a13_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
index 8fcba48e0a42..9a2517b65113 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c
@@ -93,6 +93,7 @@ static const struct sunxi_pinctrl_desc sun6i_a31_r_pinctrl_data = {
.pins = sun6i_a31_r_pins,
.npins = ARRAY_SIZE(sun6i_a31_r_pins),
.pin_base = PL_BASE,
+ .irq_banks = 2,
};
static int sun6i_a31_r_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
index 8dea5856458b..a2b4b85c5ad5 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun6i-a31.c
@@ -24,208 +24,244 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D0 */
- SUNXI_FUNCTION(0x4, "uart1")), /* DTR */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 0)), /* PA_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD1 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D1 */
- SUNXI_FUNCTION(0x4, "uart1")), /* DSR */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 1)), /* PA_EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D2 */
- SUNXI_FUNCTION(0x4, "uart1")), /* DCD */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 2)), /* PA_EINT2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D3 */
- SUNXI_FUNCTION(0x4, "uart1")), /* RING */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RING */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 3)), /* PA_EINT3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD4 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D4 */
- SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 4)), /* PA_EINT4 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD5 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D5 */
- SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 5)), /* PA_EINT5 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD6 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D6 */
- SUNXI_FUNCTION(0x4, "uart1")), /* RTS */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 6)), /* PA_EINT6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXD7 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D7 */
- SUNXI_FUNCTION(0x4, "uart1")), /* CTS */
+ SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 7)), /* PA_EINT7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXCLK */
- SUNXI_FUNCTION(0x3, "lcd1")), /* D8 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D8 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 8)), /* PA_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */
SUNXI_FUNCTION(0x3, "lcd1"), /* D9 */
SUNXI_FUNCTION(0x4, "mmc3"), /* CMD */
- SUNXI_FUNCTION(0x5, "mmc2")), /* CMD */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 9)), /* PA_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* GTXCLK */
SUNXI_FUNCTION(0x3, "lcd1"), /* D10 */
SUNXI_FUNCTION(0x4, "mmc3"), /* CLK */
- SUNXI_FUNCTION(0x5, "mmc2")), /* CLK */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 10)), /* PA_EINT10 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D11 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D0 */
- SUNXI_FUNCTION(0x5, "mmc2")), /* D0 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 11)), /* PA_EINT11 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D12 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D1 */
- SUNXI_FUNCTION(0x5, "mmc2")), /* D1 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 12)), /* PA_EINT12 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D13 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D2 */
- SUNXI_FUNCTION(0x5, "mmc2")), /* D2 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 13)), /* PA_EINT13 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */
SUNXI_FUNCTION(0x3, "lcd1"), /* D14 */
SUNXI_FUNCTION(0x4, "mmc3"), /* D3 */
- SUNXI_FUNCTION(0x5, "mmc2")), /* D3 */
+ SUNXI_FUNCTION(0x5, "mmc2"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)), /* PA_EINT14 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD4 */
- SUNXI_FUNCTION(0x3, "lcd1")), /* D15 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D15 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)), /* PA_EINT15 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD5 */
- SUNXI_FUNCTION(0x3, "lcd1")), /* D16 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D16 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)), /* PA_EINT16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD6 */
- SUNXI_FUNCTION(0x3, "lcd1")), /* D17 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D17 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 17)), /* PA_EINT17 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXD7 */
- SUNXI_FUNCTION(0x3, "lcd1")), /* D18 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D18 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 18)), /* PA_EINT18 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXDV */
SUNXI_FUNCTION(0x3, "lcd1"), /* D19 */
- SUNXI_FUNCTION(0x4, "pwm3")), /* Positive */
+ SUNXI_FUNCTION(0x4, "pwm3"), /* Positive */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 19)), /* PA_EINT19 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 20),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXCLK */
SUNXI_FUNCTION(0x3, "lcd1"), /* D20 */
- SUNXI_FUNCTION(0x4, "pwm3")), /* Negative */
+ SUNXI_FUNCTION(0x4, "pwm3"), /* Negative */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 20)), /* PA_EINT20 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 21),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* TXERR */
SUNXI_FUNCTION(0x3, "lcd1"), /* D21 */
- SUNXI_FUNCTION(0x4, "spi3")), /* CS0 */
+ SUNXI_FUNCTION(0x4, "spi3"), /* CS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 21)), /* PA_EINT21 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 22),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */
SUNXI_FUNCTION(0x3, "lcd1"), /* D22 */
- SUNXI_FUNCTION(0x4, "spi3")), /* CLK */
+ SUNXI_FUNCTION(0x4, "spi3"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 22)), /* PA_EINT22 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 23),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* COL */
SUNXI_FUNCTION(0x3, "lcd1"), /* D23 */
- SUNXI_FUNCTION(0x4, "spi3")), /* MOSI */
+ SUNXI_FUNCTION(0x4, "spi3"), /* MOSI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 23)), /* PA_EINT23 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 24),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* CRS */
SUNXI_FUNCTION(0x3, "lcd1"), /* CLK */
- SUNXI_FUNCTION(0x4, "spi3")), /* MISO */
+ SUNXI_FUNCTION(0x4, "spi3"), /* MISO */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 24)), /* PA_EINT24 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 25),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* CLKIN */
SUNXI_FUNCTION(0x3, "lcd1"), /* DE */
- SUNXI_FUNCTION(0x4, "spi3")), /* CS1 */
+ SUNXI_FUNCTION(0x4, "spi3"), /* CS1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 25)), /* PA_EINT25 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 26),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* MDC */
- SUNXI_FUNCTION(0x3, "lcd1")), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* HSYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 26)), /* PA_EINT26 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 27),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "gmac"), /* MDIO */
- SUNXI_FUNCTION(0x3, "lcd1")), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* VSYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 27)), /* PA_EINT27 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
- SUNXI_FUNCTION(0x4, "csi")), /* MCLK1 */
+ SUNXI_FUNCTION(0x4, "csi"), /* MCLK1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 0)), /* PB_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0")), /* BCLK */
+ SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 1)), /* PB_EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0")), /* LRCK */
+ SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 2)), /* PB_EINT2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "i2s0")), /* DO0 */
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 3)), /* PB_EINT3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s0"), /* DO1 */
- SUNXI_FUNCTION(0x3, "uart3")), /* RTS */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 4)), /* PB_EINT4 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s0"), /* DO2 */
SUNXI_FUNCTION(0x3, "uart3"), /* TX */
- SUNXI_FUNCTION(0x4, "i2c3")), /* SCK */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 5)), /* PB_EINT5 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2s0"), /* DO3 */
SUNXI_FUNCTION(0x3, "uart3"), /* RX */
- SUNXI_FUNCTION(0x4, "i2c3")), /* SDA */
+ SUNXI_FUNCTION(0x4, "i2c3"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 6)), /* PB_EINT6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2s0")), /* DI */
+ SUNXI_FUNCTION(0x3, "i2s0"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 7)), /* PB_EINT7 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -510,86 +546,103 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
- SUNXI_FUNCTION(0x3, "ts")), /* CLK */
+ SUNXI_FUNCTION(0x3, "ts"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 0)), /* PE_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
- SUNXI_FUNCTION(0x3, "ts")), /* ERR */
+ SUNXI_FUNCTION(0x3, "ts"), /* ERR */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PE_EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
- SUNXI_FUNCTION(0x3, "ts")), /* SYNC */
+ SUNXI_FUNCTION(0x3, "ts"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 2)), /* PE_EINT2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
- SUNXI_FUNCTION(0x3, "ts")), /* DVLD */
+ SUNXI_FUNCTION(0x3, "ts"), /* DVLD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 3)), /* PE_EINT3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D0 */
- SUNXI_FUNCTION(0x3, "uart5")), /* TX */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 4)), /* PE_EINT4 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D1 */
- SUNXI_FUNCTION(0x3, "uart5")), /* RX */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 5)), /* PE_EINT5 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D2 */
- SUNXI_FUNCTION(0x3, "uart5")), /* RTS */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 6)), /* PE_EINT6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D3 */
- SUNXI_FUNCTION(0x3, "uart5")), /* CTS */
+ SUNXI_FUNCTION(0x3, "uart5"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 7)), /* PE_EINT7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D4 */
- SUNXI_FUNCTION(0x3, "ts")), /* D0 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 8)), /* PE_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D5 */
- SUNXI_FUNCTION(0x3, "ts")), /* D1 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 9)), /* PE_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D6 */
- SUNXI_FUNCTION(0x3, "ts")), /* D2 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 10)), /* PE_EINT10 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D7 */
- SUNXI_FUNCTION(0x3, "ts")), /* D3 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PE_EINT11 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D8 */
- SUNXI_FUNCTION(0x3, "ts")), /* D4 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D4 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 12)), /* PE_EINT12 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D9 */
- SUNXI_FUNCTION(0x3, "ts")), /* D5 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D5 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 13)), /* PE_EINT13 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D10 */
- SUNXI_FUNCTION(0x3, "ts")), /* D6 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D6 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 14)), /* PE_EINT14 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "csi"), /* D11 */
- SUNXI_FUNCTION(0x3, "ts")), /* D7 */
+ SUNXI_FUNCTION(0x3, "ts"), /* D7 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 15)), /* PE_EINT15 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "csi")), /* MIPI CSI MCLK */
+ SUNXI_FUNCTION(0x2, "csi"), /* MIPI CSI MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 16)), /* PE_EINT16 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -625,86 +678,105 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1")), /* CLK */
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 0)), /* PG_EINT0 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1")), /* CMD */
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 1)), /* PG_EINT1 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1")), /* D0 */
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 2)), /* PG_EINT2 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1")), /* D1 */
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 3)), /* PG_EINT3 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1")), /* D2 */
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 4)), /* PG_EINT4 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "mmc1")), /* D3 */
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 5)), /* PG_EINT5 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2")), /* TX */
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 6)), /* PG_EINT6 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2")), /* RX */
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 7)), /* PG_EINT7 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2")), /* RTS */
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 8)), /* PG_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart2")), /* CTS */
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 9)), /* PG_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
- SUNXI_FUNCTION(0x3, "usb")), /* DP3 */
+ SUNXI_FUNCTION(0x3, "usb"), /* DP3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 10)), /* PG_EINT10 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
- SUNXI_FUNCTION(0x3, "usb")), /* DM3 */
+ SUNXI_FUNCTION(0x3, "usb"), /* DM3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 11)), /* PG_EINT11 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "i2s1")), /* MCLK */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* MCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 12)), /* PG_EINT12 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
- SUNXI_FUNCTION(0x3, "i2s1")), /* BCLK */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* BCLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 13)), /* PG_EINT13 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 14),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
- SUNXI_FUNCTION(0x3, "i2s1")), /* LRCK */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* LRCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 14)), /* PG_EINT14 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 15),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
- SUNXI_FUNCTION(0x3, "i2s1")), /* DIN */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 15)), /* PG_EINT15 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 16),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x3, "i2s1")), /* DOUT */
+ SUNXI_FUNCTION(0x3, "i2s1"), /* DOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 16)), /* PG_EINT16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 17),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart4")), /* TX */
+ SUNXI_FUNCTION(0x2, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 17)), /* PG_EINT17 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x2, "uart4")), /* RX */
+ SUNXI_FUNCTION(0x2, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 3, 18)), /* PG_EINT18 */
/* Hole */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -836,6 +908,7 @@ static const struct sunxi_desc_pin sun6i_a31_pins[] = {
static const struct sunxi_pinctrl_desc sun6i_a31_pinctrl_data = {
.pins = sun6i_a31_pins,
.npins = ARRAY_SIZE(sun6i_a31_pins),
+ .irq_banks = 4,
};
static int sun6i_a31_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
index d8577ce5f1a4..dac99e02bfdb 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun7i-a20.c
@@ -1036,6 +1036,7 @@ static const struct sunxi_desc_pin sun7i_a20_pins[] = {
static const struct sunxi_pinctrl_desc sun7i_a20_pinctrl_data = {
.pins = sun7i_a20_pins,
.npins = ARRAY_SIZE(sun7i_a20_pins),
+ .irq_banks = 1,
};
static int sun7i_a20_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
new file mode 100644
index 000000000000..90f3b3a7c51e
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c
@@ -0,0 +1,142 @@
+/*
+ * Allwinner A23 SoCs special pins pinctrl driver.
+ *
+ * Copyright (C) 2014 Chen-Yu Tsai
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * Copyright (C) 2014 Boris Brezillon
+ * Boris Brezillon <boris.brezillon@free-electrons.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/reset.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_a23_r_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SCK */
+ SUNXI_FUNCTION(0x3, "s_twi"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 0)), /* PL_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_rsb"), /* SDA */
+ SUNXI_FUNCTION(0x3, "s_twi"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 1)), /* PL_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 2)), /* PL_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_uart"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 3)), /* PL_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_jtag"), /* MS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 4)), /* PL_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_jtag"), /* CK */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 5)), /* PL_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_jtag"), /* DO */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 6)), /* PL_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "s_jtag"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 7)), /* PL_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_twi"), /* SCK */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 8)), /* PL_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_twi"), /* SDA */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 9)), /* PL_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "s_pwm"),
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 10)), /* PL_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(L, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 11)), /* PL_EINT11 */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_a23_r_pinctrl_data = {
+ .pins = sun8i_a23_r_pins,
+ .npins = ARRAY_SIZE(sun8i_a23_r_pins),
+ .pin_base = PL_BASE,
+ .irq_banks = 1,
+};
+
+static int sun8i_a23_r_pinctrl_probe(struct platform_device *pdev)
+{
+ struct reset_control *rstc;
+ int ret;
+
+ rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rstc)) {
+ dev_err(&pdev->dev, "Reset controller missing\n");
+ return PTR_ERR(rstc);
+ }
+
+ ret = reset_control_deassert(rstc);
+ if (ret)
+ return ret;
+
+ ret = sunxi_pinctrl_init(pdev,
+ &sun8i_a23_r_pinctrl_data);
+
+ if (ret)
+ reset_control_assert(rstc);
+
+ return ret;
+}
+
+static struct of_device_id sun8i_a23_r_pinctrl_match[] = {
+ { .compatible = "allwinner,sun8i-a23-r-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_a23_r_pinctrl_match);
+
+static struct platform_driver sun8i_a23_r_pinctrl_driver = {
+ .probe = sun8i_a23_r_pinctrl_probe,
+ .driver = {
+ .name = "sun8i-a23-r-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = sun8i_a23_r_pinctrl_match,
+ },
+};
+module_platform_driver(sun8i_a23_r_pinctrl_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("Allwinner A23 R_PIO pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
new file mode 100644
index 000000000000..ac71e8c5901b
--- /dev/null
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a23.c
@@ -0,0 +1,593 @@
+/*
+ * Allwinner A23 SoCs pinctrl driver.
+ *
+ * Copyright (C) 2014 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-sunxi.h"
+
+static const struct sunxi_desc_pin sun8i_a23_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 0)), /* PA_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CKO */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 1)), /* PA_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DOO */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 2)), /* PA_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DIO */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 3)), /* PA_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 4)), /* PA_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 5)), /* PA_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 6)), /* PA_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(A, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 0, 7)), /* PA_EINT7 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 0)), /* PB_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 1)), /* PB_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 2)), /* PB_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 3)), /* PB_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 4)), /* PB_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 5)), /* PB_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 6)), /* PB_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s0"), /* DI */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 1, 7)), /* PB_EINT7 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* WE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CE1 */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RE */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* RB1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand"), /* DQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand"), /* DQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand"), /* DQS */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* RST */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "mmc1")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "mmc1")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "mmc1")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "mmc1")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "mmc1")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "mmc1")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "uart3")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "uart3")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "uart1")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "uart1")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "uart1")), /* CTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* DOUT */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* DIN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 18),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 19),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 20),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 21),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 22),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 23),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 24),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 25),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 26),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(D, 27),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* PCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* VSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SCK */
+ SUNXI_FUNCTION(0x3, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* SDA */
+ SUNXI_FUNCTION(0x3, "i2c2")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 14),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 15),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 16),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 17),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* MS1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DI1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DO1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(F, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* CK1 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 0)), /* PG_EINT0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* CMD */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 1)), /* PG_EINT1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D0 */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 2)), /* PG_EINT2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D1 */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 3)), /* PG_EINT3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D2 */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 4)), /* PG_EINT4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1"), /* D3 */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 5)), /* PG_EINT5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* TX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 6)), /* PG_EINT6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart1"), /* RX */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 7)), /* PG_EINT7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 8)), /* PG_EINT8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 9)), /* PG_EINT9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 10),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* SYNC */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 10)), /* PG_EINT10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 11),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* CLK */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 11)), /* PG_EINT11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 12),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* DOUT */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 12)), /* PG_EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(G, 13),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s1"), /* DIN */
+ SUNXI_FUNCTION_IRQ_BANK(0x4, 2, 13)), /* PG_EINT13 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 0),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 1),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm1")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 2),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 3),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 4),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 5),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 6),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CS */
+ SUNXI_FUNCTION(0x3, "uart3")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 7),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart3")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 8),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* DOUT */
+ SUNXI_FUNCTION(0x3, "uart3")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* DIN */
+ SUNXI_FUNCTION(0x3, "uart3")), /* CTS */
+};
+
+static const struct sunxi_pinctrl_desc sun8i_a23_pinctrl_data = {
+ .pins = sun8i_a23_pins,
+ .npins = ARRAY_SIZE(sun8i_a23_pins),
+ .irq_banks = 3,
+};
+
+static int sun8i_a23_pinctrl_probe(struct platform_device *pdev)
+{
+ return sunxi_pinctrl_init(pdev,
+ &sun8i_a23_pinctrl_data);
+}
+
+static struct of_device_id sun8i_a23_pinctrl_match[] = {
+ { .compatible = "allwinner,sun8i-a23-pinctrl", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun8i_a23_pinctrl_match);
+
+static struct platform_driver sun8i_a23_pinctrl_driver = {
+ .probe = sun8i_a23_pinctrl_probe,
+ .driver = {
+ .name = "sun8i-a23-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = sun8i_a23_pinctrl_match,
+ },
+};
+module_platform_driver(sun8i_a23_pinctrl_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
+MODULE_DESCRIPTION("Allwinner A23 pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 5f38c7f67834..3df66e366c87 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -31,6 +31,9 @@
#include "../core.h"
#include "pinctrl-sunxi.h"
+static struct irq_chip sunxi_pinctrl_edge_irq_chip;
+static struct irq_chip sunxi_pinctrl_level_irq_chip;
+
static struct sunxi_pinctrl_group *
sunxi_pinctrl_find_group_by_name(struct sunxi_pinctrl *pctl, const char *group)
{
@@ -508,7 +511,7 @@ static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
base = PINS_PER_BANK * gpiospec->args[0];
pin = base + gpiospec->args[1];
- if (pin > (gc->base + gc->ngpio))
+ if (pin > gc->ngpio)
return -EINVAL;
if (flags)
@@ -521,25 +524,61 @@ static int sunxi_pinctrl_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
struct sunxi_desc_function *desc;
+ unsigned pinnum = pctl->desc->pin_base + offset;
+ unsigned irqnum;
if (offset >= chip->ngpio)
return -ENXIO;
- desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, offset, "irq");
+ desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, pinnum, "irq");
if (!desc)
return -EINVAL;
+ irqnum = desc->irqbank * IRQ_PER_BANK + desc->irqnum;
+
dev_dbg(chip->dev, "%s: request IRQ for GPIO %d, return %d\n",
- chip->label, offset + chip->base, desc->irqnum);
+ chip->label, offset + chip->base, irqnum);
- return irq_find_mapping(pctl->domain, desc->irqnum);
+ return irq_find_mapping(pctl->domain, irqnum);
}
+static int sunxi_pinctrl_irq_request_resources(struct irq_data *d)
+{
+ struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ struct sunxi_desc_function *func;
+ int ret;
+
+ func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
+ pctl->irq_array[d->hwirq], "irq");
+ if (!func)
+ return -EINVAL;
+
+ ret = gpio_lock_as_irq(pctl->chip,
+ pctl->irq_array[d->hwirq] - pctl->desc->pin_base);
+ if (ret) {
+ dev_err(pctl->dev, "unable to lock HW IRQ %lu for IRQ\n",
+ irqd_to_hwirq(d));
+ return ret;
+ }
+
+ /* Change muxing to INT mode */
+ sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
-static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
- unsigned int type)
+ return 0;
+}
+
+static void sunxi_pinctrl_irq_release_resources(struct irq_data *d)
+{
+ struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+
+ gpio_unlock_as_irq(pctl->chip,
+ pctl->irq_array[d->hwirq] - pctl->desc->pin_base);
+}
+
+static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
+ struct irq_desc *desc = container_of(d, struct irq_desc, irq_data);
u32 reg = sunxi_irq_cfg_reg(d->hwirq);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
unsigned long flags;
@@ -566,6 +605,14 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
return -EINVAL;
}
+ if (type & IRQ_TYPE_LEVEL_MASK) {
+ d->chip = &sunxi_pinctrl_level_irq_chip;
+ desc->handle_irq = handle_fasteoi_irq;
+ } else {
+ d->chip = &sunxi_pinctrl_edge_irq_chip;
+ desc->handle_irq = handle_edge_irq;
+ }
+
spin_lock_irqsave(&pctl->lock, flags);
regval = readl(pctl->membase + reg);
@@ -577,26 +624,14 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
return 0;
}
-static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d)
+static void sunxi_pinctrl_irq_ack(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- u32 ctrl_reg = sunxi_irq_ctrl_reg(d->hwirq);
- u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
u32 status_reg = sunxi_irq_status_reg(d->hwirq);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
- unsigned long flags;
- u32 val;
-
- spin_lock_irqsave(&pctl->lock, flags);
-
- /* Mask the IRQ */
- val = readl(pctl->membase + ctrl_reg);
- writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
/* Clear the IRQ */
writel(1 << status_idx, pctl->membase + status_reg);
-
- spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -619,19 +654,11 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
{
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
- struct sunxi_desc_function *func;
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
unsigned long flags;
u32 val;
- func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
- pctl->irq_array[d->hwirq],
- "irq");
-
- /* Change muxing to INT mode */
- sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
-
spin_lock_irqsave(&pctl->lock, flags);
/* Unmask the IRQ */
@@ -641,28 +668,60 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
spin_unlock_irqrestore(&pctl->lock, flags);
}
-static struct irq_chip sunxi_pinctrl_irq_chip = {
+static void sunxi_pinctrl_irq_ack_unmask(struct irq_data *d)
+{
+ sunxi_pinctrl_irq_ack(d);
+ sunxi_pinctrl_irq_unmask(d);
+}
+
+static struct irq_chip sunxi_pinctrl_edge_irq_chip = {
+ .irq_ack = sunxi_pinctrl_irq_ack,
+ .irq_mask = sunxi_pinctrl_irq_mask,
+ .irq_unmask = sunxi_pinctrl_irq_unmask,
+ .irq_request_resources = sunxi_pinctrl_irq_request_resources,
+ .irq_release_resources = sunxi_pinctrl_irq_release_resources,
+ .irq_set_type = sunxi_pinctrl_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct irq_chip sunxi_pinctrl_level_irq_chip = {
+ .irq_eoi = sunxi_pinctrl_irq_ack,
.irq_mask = sunxi_pinctrl_irq_mask,
- .irq_mask_ack = sunxi_pinctrl_irq_mask_ack,
.irq_unmask = sunxi_pinctrl_irq_unmask,
+ /* Define irq_enable / disable to avoid spurious irqs for drivers
+ * using these to suppress irqs while they clear the irq source */
+ .irq_enable = sunxi_pinctrl_irq_ack_unmask,
+ .irq_disable = sunxi_pinctrl_irq_mask,
+ .irq_request_resources = sunxi_pinctrl_irq_request_resources,
+ .irq_release_resources = sunxi_pinctrl_irq_release_resources,
.irq_set_type = sunxi_pinctrl_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_EOI_THREADED |
+ IRQCHIP_EOI_IF_HANDLED,
};
static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
- const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
+ unsigned long bank, reg, val;
+
+ for (bank = 0; bank < pctl->desc->irq_banks; bank++)
+ if (irq == pctl->irq[bank])
+ break;
+
+ if (bank == pctl->desc->irq_banks)
+ return;
- /* Clear all interrupts */
- writel(reg, pctl->membase + IRQ_STATUS_REG);
+ reg = sunxi_irq_status_reg_from_bank(bank);
+ val = readl(pctl->membase + reg);
- if (reg) {
+ if (val) {
int irqoffset;
chained_irq_enter(chip, desc);
- for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
- int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
+ for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) {
+ int pin_irq = irq_find_mapping(pctl->domain,
+ bank * IRQ_PER_BANK + irqoffset);
generic_handle_irq(pin_irq);
}
chained_irq_exit(chip, desc);
@@ -730,8 +789,11 @@ static int sunxi_pinctrl_build_state(struct platform_device *pdev)
while (func->name) {
/* Create interrupt mapping while we're at it */
- if (!strcmp(func->name, "irq"))
- pctl->irq_array[func->irqnum] = pin->pin.number;
+ if (!strcmp(func->name, "irq")) {
+ int irqnum = func->irqnum + func->irqbank * IRQ_PER_BANK;
+ pctl->irq_array[irqnum] = pin->pin.number;
+ }
+
sunxi_pinctrl_add_function(pctl, func->name);
func++;
}
@@ -801,6 +863,13 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
pctl->dev = &pdev->dev;
pctl->desc = desc;
+ pctl->irq_array = devm_kcalloc(&pdev->dev,
+ IRQ_PER_BANK * pctl->desc->irq_banks,
+ sizeof(*pctl->irq_array),
+ GFP_KERNEL);
+ if (!pctl->irq_array)
+ return -ENOMEM;
+
ret = sunxi_pinctrl_build_state(pdev);
if (ret) {
dev_err(&pdev->dev, "dt probe failed: %d\n", ret);
@@ -869,7 +938,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
const struct sunxi_desc_pin *pin = pctl->desc->pins + i;
ret = gpiochip_add_pin_range(pctl->chip, dev_name(&pdev->dev),
- pin->pin.number,
+ pin->pin.number - pctl->desc->pin_base,
pin->pin.number, 1);
if (ret)
goto gpiochip_error;
@@ -885,30 +954,51 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
if (ret)
goto gpiochip_error;
- pctl->irq = irq_of_parse_and_map(node, 0);
+ pctl->irq = devm_kcalloc(&pdev->dev,
+ pctl->desc->irq_banks,
+ sizeof(*pctl->irq),
+ GFP_KERNEL);
if (!pctl->irq) {
- ret = -EINVAL;
+ ret = -ENOMEM;
goto clk_error;
}
- pctl->domain = irq_domain_add_linear(node, SUNXI_IRQ_NUMBER,
- &irq_domain_simple_ops, NULL);
+ for (i = 0; i < pctl->desc->irq_banks; i++) {
+ pctl->irq[i] = platform_get_irq(pdev, i);
+ if (pctl->irq[i] < 0) {
+ ret = pctl->irq[i];
+ goto clk_error;
+ }
+ }
+
+ pctl->domain = irq_domain_add_linear(node,
+ pctl->desc->irq_banks * IRQ_PER_BANK,
+ &irq_domain_simple_ops,
+ NULL);
if (!pctl->domain) {
dev_err(&pdev->dev, "Couldn't register IRQ domain\n");
ret = -ENOMEM;
goto clk_error;
}
- for (i = 0; i < SUNXI_IRQ_NUMBER; i++) {
+ for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) {
int irqno = irq_create_mapping(pctl->domain, i);
- irq_set_chip_and_handler(irqno, &sunxi_pinctrl_irq_chip,
- handle_simple_irq);
+ irq_set_chip_and_handler(irqno, &sunxi_pinctrl_edge_irq_chip,
+ handle_edge_irq);
irq_set_chip_data(irqno, pctl);
};
- irq_set_chained_handler(pctl->irq, sunxi_pinctrl_irq_handler);
- irq_set_handler_data(pctl->irq, pctl);
+ for (i = 0; i < pctl->desc->irq_banks; i++) {
+ /* Mask and clear all IRQs before registering a handler */
+ writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i));
+ writel(0xffffffff,
+ pctl->membase + sunxi_irq_status_reg_from_bank(i));
+
+ irq_set_chained_handler(pctl->irq[i],
+ sunxi_pinctrl_irq_handler);
+ irq_set_handler_data(pctl->irq[i], pctl);
+ }
dev_info(&pdev->dev, "initialized sunXi PIO driver\n");
@@ -917,8 +1007,7 @@ int sunxi_pinctrl_init(struct platform_device *pdev,
clk_error:
clk_disable_unprepare(clk);
gpiochip_error:
- if (gpiochip_remove(pctl->chip))
- dev_err(&pdev->dev, "failed to remove gpio chip\n");
+ gpiochip_remove(pctl->chip);
pinctrl_error:
pinctrl_unregister(pctl->pctl_dev);
return ret;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index 8169ba598876..4245b96c7996 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -53,7 +53,7 @@
#define PULL_PINS_BITS 2
#define PULL_PINS_MASK 0x03
-#define SUNXI_IRQ_NUMBER 32
+#define IRQ_PER_BANK 32
#define IRQ_CFG_REG 0x200
#define IRQ_CFG_IRQ_PER_REG 8
@@ -68,6 +68,8 @@
#define IRQ_STATUS_IRQ_BITS 1
#define IRQ_STATUS_IRQ_MASK ((1 << IRQ_STATUS_IRQ_BITS) - 1)
+#define IRQ_MEM_SIZE 0x20
+
#define IRQ_EDGE_RISING 0x00
#define IRQ_EDGE_FALLING 0x01
#define IRQ_LEVEL_HIGH 0x02
@@ -77,6 +79,7 @@
struct sunxi_desc_function {
const char *name;
u8 muxval;
+ u8 irqbank;
u8 irqnum;
};
@@ -89,6 +92,7 @@ struct sunxi_pinctrl_desc {
const struct sunxi_desc_pin *pins;
int npins;
unsigned pin_base;
+ unsigned irq_banks;
};
struct sunxi_pinctrl_function {
@@ -113,8 +117,8 @@ struct sunxi_pinctrl {
unsigned nfunctions;
struct sunxi_pinctrl_group *groups;
unsigned ngroups;
- int irq;
- int irq_array[SUNXI_IRQ_NUMBER];
+ int *irq;
+ unsigned *irq_array;
spinlock_t lock;
struct pinctrl_dev *pctl_dev;
};
@@ -139,6 +143,14 @@ struct sunxi_pinctrl {
.irqnum = _irq, \
}
+#define SUNXI_FUNCTION_IRQ_BANK(_val, _bank, _irq) \
+ { \
+ .name = "irq", \
+ .muxval = _val, \
+ .irqbank = _bank, \
+ .irqnum = _irq, \
+ }
+
/*
* The sunXi PIO registers are organized as is:
* 0x00 - 0x0c Muxing values.
@@ -218,8 +230,10 @@ static inline u32 sunxi_pull_offset(u16 pin)
static inline u32 sunxi_irq_cfg_reg(u16 irq)
{
- u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04;
- return reg + IRQ_CFG_REG;
+ u8 bank = irq / IRQ_PER_BANK;
+ u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04;
+
+ return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg;
}
static inline u32 sunxi_irq_cfg_offset(u16 irq)
@@ -228,10 +242,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
return irq_num * IRQ_CFG_IRQ_BITS;
}
+static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank)
+{
+ return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE;
+}
+
static inline u32 sunxi_irq_ctrl_reg(u16 irq)
{
- u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04;
- return reg + IRQ_CTRL_REG;
+ u8 bank = irq / IRQ_PER_BANK;
+
+ return sunxi_irq_ctrl_reg_from_bank(bank);
}
static inline u32 sunxi_irq_ctrl_offset(u16 irq)
@@ -240,10 +260,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
return irq_num * IRQ_CTRL_IRQ_BITS;
}
+static inline u32 sunxi_irq_status_reg_from_bank(u8 bank)
+{
+ return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE;
+}
+
static inline u32 sunxi_irq_status_reg(u16 irq)
{
- u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04;
- return reg + IRQ_STATUS_REG;
+ u8 bank = irq / IRQ_PER_BANK;
+
+ return sunxi_irq_status_reg_from_bank(bank);
}
static inline u32 sunxi_irq_status_offset(u16 irq)
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 2c61281bebd7..8cea355f9a81 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -141,17 +141,6 @@ static int wmt_pmx_enable(struct pinctrl_dev *pctldev,
return wmt_set_pinmux(data, func_selector, pinnum);
}
-static void wmt_pmx_disable(struct pinctrl_dev *pctldev,
- unsigned func_selector,
- unsigned group_selector)
-{
- struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
- u32 pinnum = data->pins[group_selector].number;
-
- /* disable by setting GPIO_IN */
- wmt_set_pinmux(data, WMT_FSEL_GPIO_IN, pinnum);
-}
-
static void wmt_pmx_gpio_disable_free(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned offset)
@@ -180,7 +169,6 @@ static struct pinmux_ops wmt_pinmux_ops = {
.get_function_name = wmt_pmx_get_function_name,
.get_function_groups = wmt_pmx_get_function_groups,
.enable = wmt_pmx_enable,
- .disable = wmt_pmx_disable,
.gpio_disable_free = wmt_pmx_gpio_disable_free,
.gpio_set_direction = wmt_pmx_gpio_set_direction,
};
@@ -627,8 +615,7 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
return 0;
fail_range:
- if (gpiochip_remove(&data->gpio_chip))
- dev_err(&pdev->dev, "failed to remove gpio chip\n");
+ gpiochip_remove(&data->gpio_chip);
fail_gpio:
pinctrl_unregister(data->pctl_dev);
return err;
@@ -637,12 +624,8 @@ fail_gpio:
int wmt_pinctrl_remove(struct platform_device *pdev)
{
struct wmt_pinctrl_data *data = platform_get_drvdata(pdev);
- int err;
-
- err = gpiochip_remove(&data->gpio_chip);
- if (err)
- dev_err(&pdev->dev, "failed to remove gpio chip\n");
+ gpiochip_remove(&data->gpio_chip);
pinctrl_unregister(data->pctl_dev);
return 0;
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 7f1a2e2711bd..d866db80b4fd 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -37,6 +37,8 @@
#define ISL_ALS_I2C_ADDR 0x44
#define TAOS_ALS_I2C_ADDR 0x29
+#define MAX_I2C_DEVICE_DEFERRALS 5
+
static struct i2c_client *als;
static struct i2c_client *tp;
static struct i2c_client *ts;
@@ -45,6 +47,8 @@ static const char *i2c_adapter_names[] = {
"SMBus I801 adapter",
"i915 gmbus vga",
"i915 gmbus panel",
+ "i2c-designware-pci",
+ "i2c-designware-pci",
};
/* Keep this enum consistent with i2c_adapter_names */
@@ -52,11 +56,21 @@ enum i2c_adapter_type {
I2C_ADAPTER_SMBUS = 0,
I2C_ADAPTER_VGADDC,
I2C_ADAPTER_PANEL,
+ I2C_ADAPTER_DESIGNWARE_0,
+ I2C_ADAPTER_DESIGNWARE_1,
+};
+
+enum i2c_peripheral_state {
+ UNPROBED = 0,
+ PROBED,
+ TIMEDOUT,
};
struct i2c_peripheral {
int (*add)(enum i2c_adapter_type type);
enum i2c_adapter_type type;
+ enum i2c_peripheral_state state;
+ int tries;
};
#define MAX_I2C_PERIPHERALS 3
@@ -97,8 +111,6 @@ static struct mxt_platform_data atmel_224s_tp_platform_data = {
.irqflags = IRQF_TRIGGER_FALLING,
.t19_num_keys = ARRAY_SIZE(mxt_t19_keys),
.t19_keymap = mxt_t19_keys,
- .config = NULL,
- .config_length = 0,
};
static struct i2c_board_info atmel_224s_tp_device = {
@@ -109,8 +121,6 @@ static struct i2c_board_info atmel_224s_tp_device = {
static struct mxt_platform_data atmel_1664s_platform_data = {
.irqflags = IRQF_TRIGGER_FALLING,
- .config = NULL,
- .config_length = 0,
};
static struct i2c_board_info atmel_1664s_device = {
@@ -162,8 +172,8 @@ static struct i2c_client *__add_probed_i2c_device(
/* add the i2c device */
client = i2c_new_probed_device(adapter, info, addrs, NULL);
if (!client)
- pr_err("%s failed to register device %d-%02x\n",
- __func__, bus, info->addr);
+ pr_notice("%s failed to register device %d-%02x\n",
+ __func__, bus, info->addr);
else
pr_debug("%s added i2c device %d-%02x\n",
__func__, bus, info->addr);
@@ -172,29 +182,43 @@ static struct i2c_client *__add_probed_i2c_device(
return client;
}
+struct i2c_lookup {
+ const char *name;
+ int instance;
+ int n;
+};
+
static int __find_i2c_adap(struct device *dev, void *data)
{
- const char *name = data;
+ struct i2c_lookup *lookup = data;
static const char *prefix = "i2c-";
struct i2c_adapter *adapter;
+
if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0)
return 0;
adapter = to_i2c_adapter(dev);
- return (strncmp(adapter->name, name, strlen(name)) == 0);
+ if (strncmp(adapter->name, lookup->name, strlen(lookup->name)) == 0 &&
+ lookup->n++ == lookup->instance)
+ return 1;
+ return 0;
}
static int find_i2c_adapter_num(enum i2c_adapter_type type)
{
struct device *dev = NULL;
struct i2c_adapter *adapter;
- const char *name = i2c_adapter_names[type];
+ struct i2c_lookup lookup;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.name = i2c_adapter_names[type];
+ lookup.instance = (type == I2C_ADAPTER_DESIGNWARE_1) ? 1 : 0;
+
/* find the adapter by name */
- dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
- __find_i2c_adap);
+ dev = bus_find_device(&i2c_bus_type, NULL, &lookup, __find_i2c_adap);
if (!dev) {
/* Adapters may appear later. Deferred probing will retry */
pr_notice("%s: i2c adapter %s not found on system.\n", __func__,
- name);
+ lookup.name);
return -ENODEV;
}
adapter = to_i2c_adapter(dev);
@@ -231,6 +255,7 @@ static struct i2c_client *add_i2c_device(const char *name,
struct i2c_board_info *info)
{
const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
+
return __add_probed_i2c_device(name,
find_i2c_adapter_num(type),
info,
@@ -328,9 +353,36 @@ static int chromeos_laptop_probe(struct platform_device *pdev)
if (i2c_dev->add == NULL)
break;
- /* Add the device. Set -EPROBE_DEFER on any failure */
- if (i2c_dev->add(i2c_dev->type))
+ if (i2c_dev->state == TIMEDOUT || i2c_dev->state == PROBED)
+ continue;
+
+ /*
+ * Check that the i2c adapter is present.
+ * -EPROBE_DEFER if missing as the adapter may appear much
+ * later.
+ */
+ if (find_i2c_adapter_num(i2c_dev->type) == -ENODEV) {
ret = -EPROBE_DEFER;
+ continue;
+ }
+
+ /* Add the device. */
+ if (i2c_dev->add(i2c_dev->type) == -EAGAIN) {
+ /*
+ * Set -EPROBE_DEFER a limited num of times
+ * if device is not successfully added.
+ */
+ if (++i2c_dev->tries < MAX_I2C_DEVICE_DEFERRALS) {
+ ret = -EPROBE_DEFER;
+ } else {
+ /* Ran out of tries. */
+ pr_notice("%s: Ran out of tries for device.\n",
+ __func__);
+ i2c_dev->state = TIMEDOUT;
+ }
+ } else {
+ i2c_dev->state = PROBED;
+ }
}
return ret;
@@ -363,6 +415,27 @@ static struct chromeos_laptop chromebook_pixel = {
},
};
+static struct chromeos_laptop hp_chromebook_14 = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ },
+};
+
+static struct chromeos_laptop dell_chromebook_11 = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ },
+};
+
+static struct chromeos_laptop toshiba_cb35 = {
+ .i2c_peripherals = {
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ },
+};
+
static struct chromeos_laptop acer_c7_chromebook = {
.i2c_peripherals = {
/* Touchpad. */
@@ -377,6 +450,17 @@ static struct chromeos_laptop acer_ac700 = {
},
};
+static struct chromeos_laptop acer_c720 = {
+ .i2c_peripherals = {
+ /* Touchscreen. */
+ { .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
+ /* Touchpad. */
+ { .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ /* Light Sensor. */
+ { .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 },
+ },
+};
+
static struct chromeos_laptop hp_pavilion_14_chromebook = {
.i2c_peripherals = {
/* Touchpad. */
@@ -420,6 +504,30 @@ static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
_CBDD(chromebook_pixel),
},
{
+ .ident = "Wolf",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Wolf"),
+ },
+ _CBDD(dell_chromebook_11),
+ },
+ {
+ .ident = "HP Chromebook 14",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Falco"),
+ },
+ _CBDD(hp_chromebook_14),
+ },
+ {
+ .ident = "Toshiba CB35",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Leon"),
+ },
+ _CBDD(toshiba_cb35),
+ },
+ {
.ident = "Acer C7 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
@@ -434,6 +542,13 @@ static struct dmi_system_id chromeos_laptop_dmi_table[] __initdata = {
_CBDD(acer_ac700),
},
{
+ .ident = "Acer C720",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
+ },
+ _CBDD(acer_c720),
+ },
+ {
.ident = "HP Pavilion 14 Chromebook",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
@@ -464,6 +579,7 @@ static struct platform_driver cros_platform_driver = {
static int __init chromeos_laptop_init(void)
{
int ret;
+
if (!dmi_check_system(chromeos_laptop_dmi_table)) {
pr_debug("%s unsupported system.\n", __func__);
return -ENODEV;
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index e0e0e65cf442..34749200e4ab 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -16,23 +16,13 @@
static struct dmi_system_id chromeos_pstore_dmi_table[] __initdata = {
{
/*
- * Today all Chromebooks/boxes ship with GOOGLE as vendor and
+ * Today all Chromebooks/boxes ship with Google_* as version and
* coreboot as bios vendor. No other systems with this
* combination are known to date.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
- DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
- },
- },
- {
- /*
- * The first Samsung Chromebox and Chromebook Series 5 550 use
- * coreboot but with Samsung as the system vendor.
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ DMI_MATCH(DMI_BIOS_VERSION, "Google_"),
},
},
{
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 5413f62d2e61..28d12bda3ac1 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -46,13 +46,7 @@ static int samsungq10_bl_set_intensity(struct backlight_device *bd)
return 0;
}
-static int samsungq10_bl_get_intensity(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static const struct backlight_ops samsungq10_bl_ops = {
- .get_brightness = samsungq10_bl_get_intensity,
.update_status = samsungq10_bl_set_intensity,
};
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index a5c6cb773e5f..d2b780aade89 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -67,8 +67,8 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
pnp_dbg(&dev->dev, "set resources\n");
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ acpi_dev = ACPI_COMPANION(&dev->dev);
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
@@ -76,6 +76,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
if (WARN_ON_ONCE(acpi_dev != dev->data))
dev->data = acpi_dev;
+ handle = acpi_dev->handle;
if (acpi_has_method(handle, METHOD_NAME__SRS)) {
struct acpi_buffer buffer;
@@ -93,8 +94,8 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
}
kfree(buffer.pointer);
}
- if (!ret && acpi_bus_power_manageable(handle))
- ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
+ if (!ret && acpi_device_power_manageable(acpi_dev))
+ ret = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
return ret;
}
@@ -102,23 +103,22 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
- acpi_handle handle;
acpi_status status;
dev_dbg(&dev->dev, "disable resources\n");
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ acpi_dev = ACPI_COMPANION(&dev->dev);
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
- if (acpi_bus_power_manageable(handle))
- acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
+ if (acpi_device_power_manageable(acpi_dev))
+ acpi_device_set_power(acpi_dev, ACPI_STATE_D3_COLD);
- /* continue even if acpi_bus_set_power() fails */
- status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+ /* continue even if acpi_device_set_power() fails */
+ status = acpi_evaluate_object(acpi_dev->handle, "_DIS", NULL, NULL);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
return -ENODEV;
@@ -128,26 +128,22 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
#ifdef CONFIG_ACPI_SLEEP
static bool pnpacpi_can_wakeup(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
+ struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return false;
}
- return acpi_bus_can_wakeup(handle);
+ return acpi_bus_can_wakeup(acpi_dev->handle);
}
static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle;
+ struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
int error = 0;
- handle = ACPI_HANDLE(&dev->dev);
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return 0;
}
@@ -159,7 +155,7 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
return error;
}
- if (acpi_bus_power_manageable(handle)) {
+ if (acpi_device_power_manageable(acpi_dev)) {
int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
ACPI_STATE_D3_COLD);
if (power_state < 0)
@@ -167,12 +163,12 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
ACPI_STATE_D0 : ACPI_STATE_D3_COLD;
/*
- * acpi_bus_set_power() often fails (keyboard port can't be
+ * acpi_device_set_power() can fail (keyboard port can't be
* powered-down?), and in any case, our return value is ignored
* by pnp_bus_suspend(). Hence we don't revert the wakeup
* setting if the set_power fails.
*/
- error = acpi_bus_set_power(handle, power_state);
+ error = acpi_device_set_power(acpi_dev, power_state);
}
return error;
@@ -180,11 +176,10 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
static int pnpacpi_resume(struct pnp_dev *dev)
{
- struct acpi_device *acpi_dev;
- acpi_handle handle = ACPI_HANDLE(&dev->dev);
+ struct acpi_device *acpi_dev = ACPI_COMPANION(&dev->dev);
int error = 0;
- if (!handle || acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!acpi_dev) {
dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__);
return -ENODEV;
}
@@ -192,8 +187,8 @@ static int pnpacpi_resume(struct pnp_dev *dev)
if (device_may_wakeup(&dev->dev))
acpi_pm_device_sleep_wake(&dev->dev, false);
- if (acpi_bus_power_manageable(handle))
- error = acpi_bus_set_power(handle, ACPI_STATE_D0);
+ if (acpi_device_power_manageable(acpi_dev))
+ error = acpi_device_set_power(acpi_dev, ACPI_STATE_D0);
return error;
}
@@ -295,9 +290,11 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
return error;
}
+ error = acpi_bind_one(&dev->dev, device);
+
num++;
- return 0;
+ return error;
}
static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
@@ -313,40 +310,6 @@ static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
return AE_OK;
}
-static int __init acpi_pnp_match(struct device *dev, void *_pnp)
-{
- struct acpi_device *acpi = to_acpi_device(dev);
- struct pnp_dev *pnp = _pnp;
-
- /* true means it matched */
- return pnp->data == acpi;
-}
-
-static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
-{
- dev = bus_find_device(&acpi_bus_type, NULL, to_pnp_dev(dev),
- acpi_pnp_match);
- if (!dev)
- return NULL;
-
- put_device(dev);
- return to_acpi_device(dev);
-}
-
-/* complete initialization of a PNPACPI device includes having
- * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
- */
-static bool acpi_pnp_bus_match(struct device *dev)
-{
- return dev->bus == &pnp_bus_type;
-}
-
-static struct acpi_bus_type __initdata acpi_pnp_bus = {
- .name = "PNP",
- .match = acpi_pnp_bus_match,
- .find_companion = acpi_pnp_find_companion,
-};
-
int pnpacpi_disabled __initdata;
static int __init pnpacpi_init(void)
{
@@ -356,10 +319,8 @@ static int __init pnpacpi_init(void)
}
printk(KERN_INFO "pnp: PnP ACPI init\n");
pnp_register_protocol(&pnpacpi_protocol);
- register_acpi_bus_type(&acpi_pnp_bus);
acpi_get_devices(NULL, pnpacpi_add_device_handler, NULL, NULL);
printk(KERN_INFO "pnp: PnP ACPI: found %d devices\n", num);
- unregister_acpi_bus_type(&acpi_pnp_bus);
pnp_platform_devices = 1;
return 0;
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index ba6975123071..73cfcdf28a36 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -137,6 +137,13 @@ config BATTERY_COLLIE
Say Y to enable support for the battery on the Sharp Zaurus
SL-5500 (collie) models.
+config BATTERY_IPAQ_MICRO
+ tristate "iPAQ Atmel Micro ASIC battery driver"
+ depends on MFD_IPAQ_MICRO
+ help
+ Choose this option if you want to monitor battery status on
+ Compaq/HP iPAQ h3100 and h3600.
+
config BATTERY_WM97XX
bool "WM97xx generic battery driver"
depends on TOUCHSCREEN_WM97XX=y
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index ee54a3e4c90a..dfa894273926 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
+obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index 79a37f6d3307..e384844a1ae1 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -840,8 +840,7 @@ static int bq2415x_notifier_call(struct notifier_block *nb,
if (bq->automode < 1)
return NOTIFY_OK;
- sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
- bq2415x_set_mode(bq, bq->reported_mode);
+ schedule_delayed_work(&bq->work, 0);
return NOTIFY_OK;
}
@@ -892,6 +891,11 @@ static void bq2415x_timer_work(struct work_struct *work)
int error;
int boost;
+ if (bq->automode > 0 && (bq->reported_mode != bq->mode)) {
+ sysfs_notify(&bq->charger.dev->kobj, NULL, "reported_mode");
+ bq2415x_set_mode(bq, bq->reported_mode);
+ }
+
if (!bq->autotimer)
return;
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index b309713b63bc..e10763e3a1d5 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -25,6 +25,7 @@
* http://www.ti.com/product/bq27425-g1
*/
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/param.h>
#include <linux/jiffies.h>
@@ -415,6 +416,9 @@ static void bq27x00_update(struct bq27x00_device_info *di)
bool is_bq27425 = di->chip == BQ27425;
cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500);
+ if ((cache.flags & 0xff) == 0xff)
+ /* read error */
+ cache.flags = -1;
if (cache.flags >= 0) {
if (!is_bq27500 && !is_bq27425
&& (cache.flags & BQ27000_FLAG_CI)) {
@@ -804,7 +808,7 @@ static int bq27x00_battery_probe(struct i2c_client *client,
goto batt_failed_1;
}
- di = kzalloc(sizeof(*di), GFP_KERNEL);
+ di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
if (!di) {
dev_err(&client->dev, "failed to allocate device info data\n");
retval = -ENOMEM;
@@ -819,14 +823,12 @@ static int bq27x00_battery_probe(struct i2c_client *client,
retval = bq27x00_powersupply_init(di);
if (retval)
- goto batt_failed_3;
+ goto batt_failed_2;
i2c_set_clientdata(client, di);
return 0;
-batt_failed_3:
- kfree(di);
batt_failed_2:
kfree(name);
batt_failed_1:
@@ -849,8 +851,6 @@ static int bq27x00_battery_remove(struct i2c_client *client)
idr_remove(&battery_id, di->id);
mutex_unlock(&battery_mutex);
- kfree(di);
-
return 0;
}
@@ -933,7 +933,6 @@ static int bq27000_battery_probe(struct platform_device *pdev)
{
struct bq27x00_device_info *di;
struct bq27000_platform_data *pdata = pdev->dev.platform_data;
- int ret;
if (!pdata) {
dev_err(&pdev->dev, "no platform_data supplied\n");
@@ -945,7 +944,7 @@ static int bq27000_battery_probe(struct platform_device *pdev)
return -EINVAL;
}
- di = kzalloc(sizeof(*di), GFP_KERNEL);
+ di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
if (!di) {
dev_err(&pdev->dev, "failed to allocate device info data\n");
return -ENOMEM;
@@ -959,16 +958,7 @@ static int bq27000_battery_probe(struct platform_device *pdev)
di->bat.name = pdata->name ?: dev_name(&pdev->dev);
di->bus.read = &bq27000_read_platform;
- ret = bq27x00_powersupply_init(di);
- if (ret)
- goto err_free;
-
- return 0;
-
-err_free:
- kfree(di);
-
- return ret;
+ return bq27x00_powersupply_init(di);
}
static int bq27000_battery_remove(struct platform_device *pdev)
@@ -977,8 +967,6 @@ static int bq27000_battery_remove(struct platform_device *pdev)
bq27x00_powersupply_unregister(di);
- kfree(di);
-
return 0;
}
diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
new file mode 100644
index 000000000000..9d694605cdb7
--- /dev/null
+++ b/drivers/power/ipaq_micro_battery.c
@@ -0,0 +1,290 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * h3xxx atmel micro companion support, battery subdevice
+ * based on previous kernel 2.4 version
+ * Author : Alessandro Gardich <gremlin@gremlin.it>
+ * Author : Linus Walleij <linus.walleij@linaro.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/ipaq-micro.h>
+#include <linux/power_supply.h>
+#include <linux/workqueue.h>
+
+#define BATT_PERIOD 100000 /* 100 seconds in milliseconds */
+
+#define MICRO_BATT_CHEM_ALKALINE 0x01
+#define MICRO_BATT_CHEM_NICD 0x02
+#define MICRO_BATT_CHEM_NIMH 0x03
+#define MICRO_BATT_CHEM_LION 0x04
+#define MICRO_BATT_CHEM_LIPOLY 0x05
+#define MICRO_BATT_CHEM_NOT_INSTALLED 0x06
+#define MICRO_BATT_CHEM_UNKNOWN 0xff
+
+#define MICRO_BATT_STATUS_HIGH 0x01
+#define MICRO_BATT_STATUS_LOW 0x02
+#define MICRO_BATT_STATUS_CRITICAL 0x04
+#define MICRO_BATT_STATUS_CHARGING 0x08
+#define MICRO_BATT_STATUS_CHARGEMAIN 0x10
+#define MICRO_BATT_STATUS_DEAD 0x20 /* Battery will not charge */
+#define MICRO_BATT_STATUS_NOTINSTALLED 0x20 /* For expansion pack batteries */
+#define MICRO_BATT_STATUS_FULL 0x40 /* Battery fully charged */
+#define MICRO_BATT_STATUS_NOBATTERY 0x80
+#define MICRO_BATT_STATUS_UNKNOWN 0xff
+
+struct micro_battery {
+ struct ipaq_micro *micro;
+ struct workqueue_struct *wq;
+ struct delayed_work update;
+ u8 ac;
+ u8 chemistry;
+ unsigned int voltage;
+ u16 temperature;
+ u8 flag;
+};
+
+static void micro_battery_work(struct work_struct *work)
+{
+ struct micro_battery *mb = container_of(work,
+ struct micro_battery, update.work);
+ struct ipaq_micro_msg msg_battery = {
+ .id = MSG_BATTERY,
+ };
+ struct ipaq_micro_msg msg_sensor = {
+ .id = MSG_THERMAL_SENSOR,
+ };
+
+ /* First send battery message */
+ ipaq_micro_tx_msg_sync(mb->micro, &msg_battery);
+ if (msg_battery.rx_len < 4)
+ pr_info("ERROR");
+
+ /*
+ * Returned message format:
+ * byte 0: 0x00 = Not plugged in
+ * 0x01 = AC adapter plugged in
+ * byte 1: chemistry
+ * byte 2: voltage LSB
+ * byte 3: voltage MSB
+ * byte 4: flags
+ * byte 5-9: same for battery 2
+ */
+ mb->ac = msg_battery.rx_data[0];
+ mb->chemistry = msg_battery.rx_data[1];
+ mb->voltage = ((((unsigned short)msg_battery.rx_data[3] << 8) +
+ msg_battery.rx_data[2]) * 5000L) * 1000 / 1024;
+ mb->flag = msg_battery.rx_data[4];
+
+ if (msg_battery.rx_len == 9)
+ pr_debug("second battery ignored\n");
+
+ /* Then read the sensor */
+ ipaq_micro_tx_msg_sync(mb->micro, &msg_sensor);
+ mb->temperature = msg_sensor.rx_data[1] << 8 | msg_sensor.rx_data[0];
+
+ queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD));
+}
+
+static int get_capacity(struct power_supply *b)
+{
+ struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
+
+ switch (mb->flag & 0x07) {
+ case MICRO_BATT_STATUS_HIGH:
+ return 100;
+ break;
+ case MICRO_BATT_STATUS_LOW:
+ return 50;
+ break;
+ case MICRO_BATT_STATUS_CRITICAL:
+ return 5;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int get_status(struct power_supply *b)
+{
+ struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
+
+ if (mb->flag == MICRO_BATT_STATUS_UNKNOWN)
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+
+ if (mb->flag & MICRO_BATT_STATUS_FULL)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ if ((mb->flag & MICRO_BATT_STATUS_CHARGING) ||
+ (mb->flag & MICRO_BATT_STATUS_CHARGEMAIN))
+ return POWER_SUPPLY_STATUS_CHARGING;
+
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+}
+
+static int micro_batt_get_property(struct power_supply *b,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ switch (mb->chemistry) {
+ case MICRO_BATT_CHEM_NICD:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_NiCd;
+ break;
+ case MICRO_BATT_CHEM_NIMH:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_NiMH;
+ break;
+ case MICRO_BATT_CHEM_LION:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case MICRO_BATT_CHEM_LIPOLY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LIPO;
+ break;
+ default:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
+ break;
+ };
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = get_status(b);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = 4700000;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_capacity(b);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = mb->temperature;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = mb->voltage;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static int micro_ac_get_property(struct power_supply *b,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct micro_battery *mb = dev_get_drvdata(b->dev->parent);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = mb->ac;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return 0;
+}
+
+static enum power_supply_property micro_batt_power_props[] = {
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
+static struct power_supply micro_batt_power = {
+ .name = "main-battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = micro_batt_power_props,
+ .num_properties = ARRAY_SIZE(micro_batt_power_props),
+ .get_property = micro_batt_get_property,
+ .use_for_apm = 1,
+};
+
+static enum power_supply_property micro_ac_power_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static struct power_supply micro_ac_power = {
+ .name = "ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = micro_ac_power_props,
+ .num_properties = ARRAY_SIZE(micro_ac_power_props),
+ .get_property = micro_ac_get_property,
+};
+
+static int micro_batt_probe(struct platform_device *pdev)
+{
+ struct micro_battery *mb;
+
+ mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
+ if (!mb)
+ return -ENOMEM;
+
+ mb->micro = dev_get_drvdata(pdev->dev.parent);
+ mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
+ INIT_DELAYED_WORK(&mb->update, micro_battery_work);
+ platform_set_drvdata(pdev, mb);
+ queue_delayed_work(mb->wq, &mb->update, 1);
+ power_supply_register(&pdev->dev, &micro_batt_power);
+ power_supply_register(&pdev->dev, &micro_ac_power);
+
+ dev_info(&pdev->dev, "iPAQ micro battery driver\n");
+ return 0;
+}
+
+static int micro_batt_remove(struct platform_device *pdev)
+
+{
+ struct micro_battery *mb = platform_get_drvdata(pdev);
+
+ power_supply_unregister(&micro_ac_power);
+ power_supply_unregister(&micro_batt_power);
+ cancel_delayed_work_sync(&mb->update);
+
+ return 0;
+}
+
+static int micro_batt_suspend(struct device *dev)
+{
+ struct micro_battery *mb = dev_get_drvdata(dev);
+
+ cancel_delayed_work_sync(&mb->update);
+ return 0;
+}
+
+static int micro_batt_resume(struct device *dev)
+{
+ struct micro_battery *mb = dev_get_drvdata(dev);
+
+ queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD));
+ return 0;
+}
+
+static const struct dev_pm_ops micro_batt_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(micro_batt_suspend, micro_batt_resume)
+};
+
+static struct platform_driver micro_batt_device_driver = {
+ .driver = {
+ .name = "ipaq-micro-battery",
+ .pm = &micro_batt_dev_pm_ops,
+ },
+ .probe = micro_batt_probe,
+ .remove = micro_batt_remove,
+};
+module_platform_driver(micro_batt_device_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("driver for iPAQ Atmel micro battery");
+MODULE_ALIAS("platform:battery-ipaq-micro");
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 5a5a24e7d43c..078afd61490d 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -537,7 +537,8 @@ static void psy_unregister_cooler(struct power_supply *psy)
}
#endif
-int __power_supply_register(struct device *parent, struct power_supply *psy, bool ws)
+static int __power_supply_register(struct device *parent,
+ struct power_supply *psy, bool ws)
{
struct device *dev;
int rc;
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 44420d1e9094..750a20275664 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -167,6 +167,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(constant_charge_voltage_max),
POWER_SUPPLY_ATTR(charge_control_limit),
POWER_SUPPLY_ATTR(charge_control_limit_max),
+ POWER_SUPPLY_ATTR(input_current_limit),
POWER_SUPPLY_ATTR(energy_full_design),
POWER_SUPPLY_ATTR(energy_empty_design),
POWER_SUPPLY_ATTR(energy_full),
@@ -178,6 +179,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(capacity_alert_max),
POWER_SUPPLY_ATTR(capacity_level),
POWER_SUPPLY_ATTR(temp),
+ POWER_SUPPLY_ATTR(temp_max),
+ POWER_SUPPLY_ATTR(temp_min),
POWER_SUPPLY_ATTR(temp_alert_min),
POWER_SUPPLY_ATTR(temp_alert_max),
POWER_SUPPLY_ATTR(temp_ambient),
@@ -189,6 +192,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(time_to_full_avg),
POWER_SUPPLY_ATTR(type),
POWER_SUPPLY_ATTR(scope),
+ POWER_SUPPLY_ATTR(charge_term_current),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index bdcf5173e377..ca41523bbebf 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -20,6 +20,17 @@ config POWER_RESET_AXXIA
Say Y if you have an Axxia family SoC.
+config POWER_RESET_BRCMSTB
+ bool "Broadcom STB reset driver" if COMPILE_TEST
+ depends on POWER_RESET && ARM
+ default ARCH_BRCMSTB
+ help
+ This driver provides restart support for ARM-based Broadcom STB
+ boards.
+
+ Say Y here if you have an ARM-based Broadcom STB board and you wish
+ to have restart support.
+
config POWER_RESET_GPIO
bool "GPIO power-off driver"
depends on OF_GPIO && POWER_RESET
@@ -28,6 +39,12 @@ config POWER_RESET_GPIO
If your board needs a GPIO high/low to power down, say Y and
create a binding in your devicetree.
+config POWER_RESET_HISI
+ bool "Hisilicon power-off driver"
+ depends on POWER_RESET && ARCH_HISI
+ help
+ Reboot support for Hisilicon boards.
+
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
depends on POWER_RESET && ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index dde2e8bbac53..a42e70edd037 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o
obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o
+obj-$(CONFIG_POWER_RESET_BRCMSTB) += brcmstb-reboot.o
obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
+obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
diff --git a/drivers/power/reset/brcmstb-reboot.c b/drivers/power/reset/brcmstb-reboot.c
new file mode 100644
index 000000000000..3f236924742a
--- /dev/null
+++ b/drivers/power/reset/brcmstb-reboot.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2013 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/regmap.h>
+#include <linux/smp.h>
+#include <linux/mfd/syscon.h>
+
+#include <asm/system_misc.h>
+
+#define RESET_SOURCE_ENABLE_REG 1
+#define SW_MASTER_RESET_REG 2
+
+static struct regmap *regmap;
+static u32 rst_src_en;
+static u32 sw_mstr_rst;
+
+static void brcmstb_reboot(enum reboot_mode mode, const char *cmd)
+{
+ int rc;
+ u32 tmp;
+
+ rc = regmap_write(regmap, rst_src_en, 1);
+ if (rc) {
+ pr_err("failed to write rst_src_en (%d)\n", rc);
+ return;
+ }
+
+ rc = regmap_read(regmap, rst_src_en, &tmp);
+ if (rc) {
+ pr_err("failed to read rst_src_en (%d)\n", rc);
+ return;
+ }
+
+ rc = regmap_write(regmap, sw_mstr_rst, 1);
+ if (rc) {
+ pr_err("failed to write sw_mstr_rst (%d)\n", rc);
+ return;
+ }
+
+ rc = regmap_read(regmap, sw_mstr_rst, &tmp);
+ if (rc) {
+ pr_err("failed to read sw_mstr_rst (%d)\n", rc);
+ return;
+ }
+
+ while (1)
+ ;
+}
+
+static int brcmstb_reboot_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct device_node *np = pdev->dev.of_node;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "syscon");
+ if (IS_ERR(regmap)) {
+ pr_err("failed to get syscon phandle\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_index(np, "syscon", RESET_SOURCE_ENABLE_REG,
+ &rst_src_en);
+ if (rc) {
+ pr_err("can't get rst_src_en offset (%d)\n", rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_index(np, "syscon", SW_MASTER_RESET_REG,
+ &sw_mstr_rst);
+ if (rc) {
+ pr_err("can't get sw_mstr_rst offset (%d)\n", rc);
+ return -EINVAL;
+ }
+
+ arm_pm_restart = brcmstb_reboot;
+
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "brcm,brcmstb-reboot", },
+ {},
+};
+
+static struct platform_driver brcmstb_reboot_driver = {
+ .probe = brcmstb_reboot_probe,
+ .driver = {
+ .name = "brcmstb-reboot",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match,
+ },
+};
+
+static int __init brcmstb_reboot_init(void)
+{
+ return platform_driver_probe(&brcmstb_reboot_driver,
+ brcmstb_reboot_probe);
+}
+subsys_initcall(brcmstb_reboot_init);
diff --git a/drivers/power/reset/gpio-poweroff.c b/drivers/power/reset/gpio-poweroff.c
index e290d48ddd99..ce849bc9b269 100644
--- a/drivers/power/reset/gpio-poweroff.c
+++ b/drivers/power/reset/gpio-poweroff.c
@@ -15,31 +15,29 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/module.h>
/*
* Hold configuration here, cannot be more than one instance of the driver
* since pm_power_off itself is global.
*/
-static int gpio_num = -1;
-static int gpio_active_low;
+static struct gpio_desc *reset_gpio;
static void gpio_poweroff_do_poweroff(void)
{
- BUG_ON(!gpio_is_valid(gpio_num));
+ BUG_ON(!reset_gpio);
/* drive it active, also inactive->active edge */
- gpio_direction_output(gpio_num, !gpio_active_low);
+ gpiod_direction_output(reset_gpio, 1);
mdelay(100);
/* drive inactive, also active->inactive edge */
- gpio_set_value(gpio_num, gpio_active_low);
+ gpiod_set_value(reset_gpio, 0);
mdelay(100);
/* drive it active, also inactive->active edge */
- gpio_set_value(gpio_num, !gpio_active_low);
+ gpiod_set_value(reset_gpio, 1);
/* give it some time */
mdelay(3000);
@@ -49,54 +47,42 @@ static void gpio_poweroff_do_poweroff(void)
static int gpio_poweroff_probe(struct platform_device *pdev)
{
- enum of_gpio_flags flags;
bool input = false;
- int ret;
/* If a pm_power_off function has already been added, leave it alone */
if (pm_power_off != NULL) {
- pr_err("%s: pm_power_off function already registered",
+ dev_err(&pdev->dev,
+ "%s: pm_power_off function already registered",
__func__);
return -EBUSY;
}
- gpio_num = of_get_gpio_flags(pdev->dev.of_node, 0, &flags);
- if (!gpio_is_valid(gpio_num))
- return gpio_num;
-
- gpio_active_low = flags & OF_GPIO_ACTIVE_LOW;
+ reset_gpio = devm_gpiod_get(&pdev->dev, NULL);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
input = of_property_read_bool(pdev->dev.of_node, "input");
- ret = gpio_request(gpio_num, "poweroff-gpio");
- if (ret) {
- pr_err("%s: Could not get GPIO %d", __func__, gpio_num);
- return ret;
- }
if (input) {
- if (gpio_direction_input(gpio_num)) {
- pr_err("Could not set direction of GPIO %d to input",
- gpio_num);
- goto err;
+ if (gpiod_direction_input(reset_gpio)) {
+ dev_err(&pdev->dev,
+ "Could not set direction of reset GPIO to input\n");
+ return -ENODEV;
}
} else {
- if (gpio_direction_output(gpio_num, gpio_active_low)) {
- pr_err("Could not set direction of GPIO %d", gpio_num);
- goto err;
+ if (gpiod_direction_output(reset_gpio, 0)) {
+ dev_err(&pdev->dev,
+ "Could not set direction of reset GPIO\n");
+ return -ENODEV;
}
}
pm_power_off = &gpio_poweroff_do_poweroff;
return 0;
-
-err:
- gpio_free(gpio_num);
- return -ENODEV;
}
static int gpio_poweroff_remove(struct platform_device *pdev)
{
- gpio_free(gpio_num);
if (pm_power_off == &gpio_poweroff_do_poweroff)
pm_power_off = NULL;
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c
new file mode 100644
index 000000000000..0c91d0231d36
--- /dev/null
+++ b/drivers/power/reset/hisi-reboot.c
@@ -0,0 +1,67 @@
+/*
+ * Hisilicon SoC reset code
+ *
+ * Copyright (c) 2014 Hisilicon Ltd.
+ * Copyright (c) 2014 Linaro Ltd.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+
+#include <asm/proc-fns.h>
+#include <asm/system_misc.h>
+
+static void __iomem *base;
+static u32 reboot_offset;
+
+static void hisi_restart(enum reboot_mode mode, const char *cmd)
+{
+ writel_relaxed(0xdeadbeef, base + reboot_offset);
+
+ while (1)
+ cpu_do_idle();
+}
+
+static int hisi_reboot_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ WARN(1, "failed to map base address");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) {
+ pr_err("failed to find reboot-offset property\n");
+ return -EINVAL;
+ }
+
+ arm_pm_restart = hisi_restart;
+
+ return 0;
+}
+
+static struct of_device_id hisi_reboot_of_match[] = {
+ { .compatible = "hisilicon,sysctrl" },
+ {}
+};
+
+static struct platform_driver hisi_reboot_driver = {
+ .probe = hisi_reboot_probe,
+ .driver = {
+ .name = "hisi-reboot",
+ .of_match_table = hisi_reboot_of_match,
+ },
+};
+module_platform_driver(hisi_reboot_driver);
diff --git a/drivers/power/reset/restart-poweroff.c b/drivers/power/reset/restart-poweroff.c
index 5758033e0c16..3e51f8d29bfe 100644
--- a/drivers/power/reset/restart-poweroff.c
+++ b/drivers/power/reset/restart-poweroff.c
@@ -62,5 +62,5 @@ module_platform_driver(restart_poweroff_driver);
MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
MODULE_DESCRIPTION("restart poweroff driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:poweroff-restart");
diff --git a/drivers/power/rx51_battery.c b/drivers/power/rx51_battery.c
index 1bc5857b8bd5..d5a2acfb8821 100644
--- a/drivers/power/rx51_battery.c
+++ b/drivers/power/rx51_battery.c
@@ -24,34 +24,27 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/i2c/twl4030-madc.h>
-
-/* RX51 specific channels */
-#define TWL4030_MADC_BTEMP_RX51 TWL4030_MADC_ADCIN0
-#define TWL4030_MADC_BCI_RX51 TWL4030_MADC_ADCIN4
+#include <linux/iio/consumer.h>
+#include <linux/of.h>
struct rx51_device_info {
struct device *dev;
struct power_supply bat;
+ struct iio_channel *channel_temp;
+ struct iio_channel *channel_bsi;
+ struct iio_channel *channel_vbat;
};
/*
* Read ADCIN channel value, code copied from maemo kernel
*/
-static int rx51_battery_read_adc(int channel)
+static int rx51_battery_read_adc(struct iio_channel *channel)
{
- struct twl4030_madc_request req;
-
- req.channels = channel;
- req.do_avg = 1;
- req.method = TWL4030_MADC_SW1;
- req.func_cb = NULL;
- req.type = TWL4030_MADC_WAIT;
- req.raw = true;
-
- if (twl4030_madc_conversion(&req) <= 0)
- return -ENODATA;
-
- return req.rbuf[ffs(channel) - 1];
+ int val, err;
+ err = iio_read_channel_average_raw(channel, &val);
+ if (err < 0)
+ return err;
+ return val;
}
/*
@@ -60,10 +53,12 @@ static int rx51_battery_read_adc(int channel)
*/
static int rx51_battery_read_voltage(struct rx51_device_info *di)
{
- int voltage = rx51_battery_read_adc(TWL4030_MADC_VBAT);
+ int voltage = rx51_battery_read_adc(di->channel_vbat);
- if (voltage < 0)
+ if (voltage < 0) {
+ dev_err(di->dev, "Could not read ADC: %d\n", voltage);
return voltage;
+ }
return 1000 * (10000 * voltage / 1705);
}
@@ -112,7 +107,10 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
{
int min = 0;
int max = ARRAY_SIZE(rx51_temp_table2) - 1;
- int raw = rx51_battery_read_adc(TWL4030_MADC_BTEMP_RX51);
+ int raw = rx51_battery_read_adc(di->channel_temp);
+
+ if (raw < 0)
+ dev_err(di->dev, "Could not read ADC: %d\n", raw);
/* Zero and negative values are undefined */
if (raw <= 0)
@@ -146,10 +144,12 @@ static int rx51_battery_read_temperature(struct rx51_device_info *di)
*/
static int rx51_battery_read_capacity(struct rx51_device_info *di)
{
- int capacity = rx51_battery_read_adc(TWL4030_MADC_BCI_RX51);
+ int capacity = rx51_battery_read_adc(di->channel_bsi);
- if (capacity < 0)
+ if (capacity < 0) {
+ dev_err(di->dev, "Could not read ADC: %d\n", capacity);
return capacity;
+ }
return 1280 * (1200 * capacity)/(1024 - capacity);
}
@@ -213,17 +213,46 @@ static int rx51_battery_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, di);
+ di->dev = &pdev->dev;
di->bat.name = dev_name(&pdev->dev);
di->bat.type = POWER_SUPPLY_TYPE_BATTERY;
di->bat.properties = rx51_battery_props;
di->bat.num_properties = ARRAY_SIZE(rx51_battery_props);
di->bat.get_property = rx51_battery_get_property;
+ di->channel_temp = iio_channel_get(di->dev, "temp");
+ if (IS_ERR(di->channel_temp)) {
+ ret = PTR_ERR(di->channel_temp);
+ goto error;
+ }
+
+ di->channel_bsi = iio_channel_get(di->dev, "bsi");
+ if (IS_ERR(di->channel_bsi)) {
+ ret = PTR_ERR(di->channel_bsi);
+ goto error_channel_temp;
+ }
+
+ di->channel_vbat = iio_channel_get(di->dev, "vbat");
+ if (IS_ERR(di->channel_vbat)) {
+ ret = PTR_ERR(di->channel_vbat);
+ goto error_channel_bsi;
+ }
+
ret = power_supply_register(di->dev, &di->bat);
if (ret)
- return ret;
+ goto error_channel_vbat;
return 0;
+
+error_channel_vbat:
+ iio_channel_release(di->channel_vbat);
+error_channel_bsi:
+ iio_channel_release(di->channel_bsi);
+error_channel_temp:
+ iio_channel_release(di->channel_temp);
+error:
+
+ return ret;
}
static int rx51_battery_remove(struct platform_device *pdev)
@@ -232,15 +261,28 @@ static int rx51_battery_remove(struct platform_device *pdev)
power_supply_unregister(&di->bat);
+ iio_channel_release(di->channel_vbat);
+ iio_channel_release(di->channel_bsi);
+ iio_channel_release(di->channel_temp);
+
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id n900_battery_of_match[] = {
+ {.compatible = "nokia,n900-battery", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, n900_battery_of_match);
+#endif
+
static struct platform_driver rx51_battery_driver = {
.probe = rx51_battery_probe,
.remove = rx51_battery_remove,
.driver = {
.name = "rx51-battery",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(n900_battery_of_match),
},
};
module_platform_driver(rx51_battery_driver);
diff --git a/drivers/power/tps65090-charger.c b/drivers/power/tps65090-charger.c
index 1685f63b9e5d..3e8ba97c8169 100644
--- a/drivers/power/tps65090-charger.c
+++ b/drivers/power/tps65090-charger.c
@@ -17,9 +17,11 @@
*/
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/freezer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -32,11 +34,15 @@
#define TPS65090_VACG BIT(1)
#define TPS65090_NOITERM BIT(5)
+#define POLL_INTERVAL (HZ * 2) /* Used when no irq */
+
struct tps65090_charger {
struct device *dev;
int ac_online;
int prev_ac_online;
int irq;
+ struct task_struct *poll_task;
+ bool passive_mode;
struct power_supply ac;
struct tps65090_platform_data *pdata;
};
@@ -49,6 +55,9 @@ static int tps65090_low_chrg_current(struct tps65090_charger *charger)
{
int ret;
+ if (charger->passive_mode)
+ return 0;
+
ret = tps65090_write(charger->dev->parent, TPS65090_REG_CG_CTRL5,
TPS65090_NOITERM);
if (ret < 0) {
@@ -64,6 +73,9 @@ static int tps65090_enable_charging(struct tps65090_charger *charger)
int ret;
uint8_t ctrl0 = 0;
+ if (charger->passive_mode)
+ return 0;
+
ret = tps65090_read(charger->dev->parent, TPS65090_REG_CG_CTRL0,
&ctrl0);
if (ret < 0) {
@@ -87,6 +99,9 @@ static int tps65090_config_charger(struct tps65090_charger *charger)
uint8_t intrmask = 0;
int ret;
+ if (charger->passive_mode)
+ return 0;
+
if (charger->pdata->enable_low_current_chrg) {
ret = tps65090_low_chrg_current(charger);
if (ret < 0) {
@@ -164,10 +179,14 @@ static irqreturn_t tps65090_charger_isr(int irq, void *dev_id)
}
/* Clear interrupts. */
- ret = tps65090_write(charger->dev->parent, TPS65090_REG_INTR_STS, 0x00);
- if (ret < 0) {
- dev_err(charger->dev, "%s(): Error in writing reg 0x%x\n",
+ if (!charger->passive_mode) {
+ ret = tps65090_write(charger->dev->parent,
+ TPS65090_REG_INTR_STS, 0x00);
+ if (ret < 0) {
+ dev_err(charger->dev,
+ "%s(): Error in writing reg 0x%x\n",
__func__, TPS65090_REG_INTR_STS);
+ }
}
if (charger->prev_ac_online != charger->ac_online)
@@ -198,6 +217,18 @@ static struct tps65090_platform_data *
}
+static int tps65090_charger_poll_task(void *data)
+{
+ set_freezable();
+
+ while (!kthread_should_stop()) {
+ schedule_timeout_interruptible(POLL_INTERVAL);
+ try_to_freeze();
+ tps65090_charger_isr(-1, data);
+ }
+ return 0;
+}
+
static int tps65090_charger_probe(struct platform_device *pdev)
{
struct tps65090_charger *cdata;
@@ -244,22 +275,10 @@ static int tps65090_charger_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_warn(&pdev->dev, "Unable to get charger irq = %d\n", irq);
- ret = irq;
- goto fail_unregister_supply;
- }
-
+ if (irq < 0)
+ irq = -ENXIO;
cdata->irq = irq;
- ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
- tps65090_charger_isr, 0, "tps65090-charger", cdata);
- if (ret) {
- dev_err(cdata->dev, "Unable to register irq %d err %d\n", irq,
- ret);
- goto fail_unregister_supply;
- }
-
ret = tps65090_config_charger(cdata);
if (ret < 0) {
dev_err(&pdev->dev, "charger config failed, err %d\n", ret);
@@ -285,6 +304,27 @@ static int tps65090_charger_probe(struct platform_device *pdev)
power_supply_changed(&cdata->ac);
}
+ if (irq != -ENXIO) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ tps65090_charger_isr, 0, "tps65090-charger", cdata);
+ if (ret) {
+ dev_err(cdata->dev,
+ "Unable to register irq %d err %d\n", irq,
+ ret);
+ goto fail_unregister_supply;
+ }
+ } else {
+ cdata->poll_task = kthread_run(tps65090_charger_poll_task,
+ cdata, "ktps65090charger");
+ cdata->passive_mode = true;
+ if (IS_ERR(cdata->poll_task)) {
+ ret = PTR_ERR(cdata->poll_task);
+ dev_err(cdata->dev,
+ "Unable to run kthread err %d\n", ret);
+ goto fail_unregister_supply;
+ }
+ }
+
return 0;
fail_unregister_supply:
@@ -297,6 +337,8 @@ static int tps65090_charger_remove(struct platform_device *pdev)
{
struct tps65090_charger *cdata = platform_get_drvdata(pdev);
+ if (cdata->irq == -ENXIO)
+ kthread_stop(cdata->poll_task);
power_supply_unregister(&cdata->ac);
return 0;
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index f14108844e1a..2598c588006e 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -28,10 +28,13 @@
#define TWL4030_BCIICHG 0x08
#define TWL4030_BCIVAC 0x0a
#define TWL4030_BCIVBUS 0x0c
+#define TWL4030_BCIMFSTS3 0x0F
#define TWL4030_BCIMFSTS4 0x10
#define TWL4030_BCICTL1 0x23
#define TWL4030_BB_CFG 0x12
+#define TWL4030_BCIMFSTS1 0x01
+
#define TWL4030_BCIAUTOWEN BIT(5)
#define TWL4030_CONFIG_DONE BIT(4)
#define TWL4030_BCIAUTOUSB BIT(1)
@@ -52,6 +55,9 @@
#define TWL4030_BBISEL_500uA 0x02
#define TWL4030_BBISEL_1000uA 0x03
+#define TWL4030_BATSTSPCHG BIT(2)
+#define TWL4030_BATSTSMCHG BIT(6)
+
/* BCI interrupts */
#define TWL4030_WOVF BIT(0) /* Watchdog overflow */
#define TWL4030_TMOVF BIT(1) /* Timer overflow */
@@ -145,6 +151,35 @@ static int twl4030bci_read_adc_val(u8 reg)
}
/*
+ * Check if Battery Pack was present
+ */
+static int twl4030_is_battery_present(struct twl4030_bci *bci)
+{
+ int ret;
+ u8 val = 0;
+
+ /* Battery presence in Main charge? */
+ ret = twl_i2c_read_u8(TWL_MODULE_MAIN_CHARGE, &val, TWL4030_BCIMFSTS3);
+ if (ret)
+ return ret;
+ if (val & TWL4030_BATSTSMCHG)
+ return 0;
+
+ /*
+ * OK, It could be that bootloader did not enable main charger,
+ * pre-charge is h/w auto. So, Battery presence in Pre-charge?
+ */
+ ret = twl_i2c_read_u8(TWL4030_MODULE_PRECHARGE, &val,
+ TWL4030_BCIMFSTS1);
+ if (ret)
+ return ret;
+ if (val & TWL4030_BATSTSPCHG)
+ return 0;
+
+ return -ENODEV;
+}
+
+/*
* Check if VBUS power is present
*/
static int twl4030_bci_have_vbus(struct twl4030_bci *bci)
@@ -541,8 +576,14 @@ static int __init twl4030_bci_probe(struct platform_device *pdev)
bci->irq_chg = platform_get_irq(pdev, 0);
bci->irq_bci = platform_get_irq(pdev, 1);
- platform_set_drvdata(pdev, bci);
+ /* Only proceed further *IF* battery is physically present */
+ ret = twl4030_is_battery_present(bci);
+ if (ret) {
+ dev_crit(&pdev->dev, "Battery was not detected:%d\n", ret);
+ goto fail_no_battery;
+ }
+ platform_set_drvdata(pdev, bci);
bci->ac.name = "twl4030_ac";
bci->ac.type = POWER_SUPPLY_TYPE_MAINS;
bci->ac.properties = twl4030_charger_props;
@@ -633,6 +674,7 @@ fail_chg_irq:
fail_register_usb:
power_supply_unregister(&bci->ac);
fail_register_ac:
+fail_no_battery:
kfree(bci);
return ret;
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 4ad7b89a4cb4..b800783800a3 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -43,7 +43,7 @@ config PWM_AB8500
config PWM_ATMEL
tristate "Atmel PWM support"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || AVR32
help
Generic PWM framework driver for Atmel SoC.
@@ -206,6 +206,13 @@ config PWM_RENESAS_TPU
To compile this driver as a module, choose M here: the module
will be called pwm-renesas-tpu.
+config PWM_ROCKCHIP
+ tristate "Rockchip PWM support"
+ depends on ARCH_ROCKCHIP
+ help
+ Generic PWM framework driver for the PWM controller found on
+ Rockchip SoCs.
+
config PWM_SAMSUNG
tristate "Samsung PWM support"
depends on PLAT_SAMSUNG
@@ -226,6 +233,16 @@ config PWM_SPEAR
To compile this driver as a module, choose M here: the module
will be called pwm-spear.
+config PWM_STI
+ tristate "STiH4xx PWM support"
+ depends on ARCH_STI
+ depends on OF
+ help
+ Generic PWM framework driver for STiH4xx SoCs.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-sti.
+
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
depends on ARCH_TEGRA
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 5c86a19d5d39..f8c577d41091 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -18,8 +18,10 @@ obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
+obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o
+obj-$(CONFIG_PWM_STI) += pwm-sti.o
obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o
obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o
obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index d797c7b84c3f..5449d9150d40 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -262,6 +262,7 @@ static int imx_pwm_probe(struct platform_device *pdev)
imx->chip.dev = &pdev->dev;
imx->chip.base = -1;
imx->chip.npwm = 1;
+ imx->chip.can_sleep = true;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 44ce6c6103ae..4df994f72d96 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -14,7 +14,6 @@
*/
#include <linux/acpi.h>
-#include <linux/clk.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,7 +36,6 @@ static int pci_drv, plat_drv; /* So we know which drivers registered */
struct pwm_lpss_chip {
struct pwm_chip chip;
void __iomem *regs;
- struct clk *clk;
unsigned long clk_rate;
};
@@ -97,11 +95,6 @@ static int pwm_lpss_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pwm_lpss_chip *lpwm = to_lpwm(chip);
u32 ctrl;
- int ret;
-
- ret = clk_prepare_enable(lpwm->clk);
- if (ret)
- return ret;
ctrl = readl(lpwm->regs + PWM);
writel(ctrl | PWM_ENABLE, lpwm->regs + PWM);
@@ -116,8 +109,6 @@ static void pwm_lpss_disable(struct pwm_chip *chip, struct pwm_device *pwm)
ctrl = readl(lpwm->regs + PWM);
writel(ctrl & ~PWM_ENABLE, lpwm->regs + PWM);
-
- clk_disable_unprepare(lpwm->clk);
}
static const struct pwm_ops pwm_lpss_ops = {
@@ -142,17 +133,7 @@ static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev,
if (IS_ERR(lpwm->regs))
return ERR_CAST(lpwm->regs);
- if (info) {
- lpwm->clk_rate = info->clk_rate;
- } else {
- lpwm->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpwm->clk)) {
- dev_err(dev, "failed to get PWM clock\n");
- return ERR_CAST(lpwm->clk);
- }
- lpwm->clk_rate = clk_get_rate(lpwm->clk);
- }
-
+ lpwm->clk_rate = info->clk_rate;
lpwm->chip.dev = dev;
lpwm->chip.ops = &pwm_lpss_ops;
lpwm->chip.base = -1;
@@ -221,12 +202,19 @@ static struct pci_driver pwm_lpss_driver_pci = {
static int pwm_lpss_probe_platform(struct platform_device *pdev)
{
+ const struct pwm_lpss_boardinfo *info;
+ const struct acpi_device_id *id;
struct pwm_lpss_chip *lpwm;
struct resource *r;
+ id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpwm = pwm_lpss_probe(&pdev->dev, r, NULL);
+ info = (struct pwm_lpss_boardinfo *)id->driver_data;
+ lpwm = pwm_lpss_probe(&pdev->dev, r, info);
if (IS_ERR(lpwm))
return PTR_ERR(lpwm);
@@ -242,7 +230,7 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
}
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
- { "80860F09", 0 },
+ { "80860F09", (unsigned long)&byt_info },
{ },
};
MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
new file mode 100644
index 000000000000..bdd8644c01cf
--- /dev/null
+++ b/drivers/pwm/pwm-rockchip.c
@@ -0,0 +1,264 @@
+/*
+ * PWM driver for Rockchip SoCs
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ * Copyright (C) 2014 ROCKCHIP, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/time.h>
+
+#define PWM_CTRL_TIMER_EN (1 << 0)
+#define PWM_CTRL_OUTPUT_EN (1 << 3)
+
+#define PWM_ENABLE (1 << 0)
+#define PWM_CONTINUOUS (1 << 1)
+#define PWM_DUTY_POSITIVE (1 << 3)
+#define PWM_INACTIVE_NEGATIVE (0 << 4)
+#define PWM_OUTPUT_LEFT (0 << 5)
+#define PWM_LP_DISABLE (0 << 8)
+
+struct rockchip_pwm_chip {
+ struct pwm_chip chip;
+ struct clk *clk;
+ const struct rockchip_pwm_data *data;
+ void __iomem *base;
+};
+
+struct rockchip_pwm_regs {
+ unsigned long duty;
+ unsigned long period;
+ unsigned long cntr;
+ unsigned long ctrl;
+};
+
+struct rockchip_pwm_data {
+ struct rockchip_pwm_regs regs;
+ unsigned int prescaler;
+
+ void (*set_enable)(struct pwm_chip *chip, bool enable);
+};
+
+static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
+{
+ return container_of(c, struct rockchip_pwm_chip, chip);
+}
+
+static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN;
+ u32 val;
+
+ val = readl_relaxed(pc->base + pc->data->regs.ctrl);
+
+ if (enable)
+ val |= enable_conf;
+ else
+ val &= ~enable_conf;
+
+ writel_relaxed(val, pc->base + pc->data->regs.ctrl);
+}
+
+static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip, bool enable)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
+ PWM_CONTINUOUS | PWM_DUTY_POSITIVE |
+ PWM_INACTIVE_NEGATIVE;
+ u32 val;
+
+ val = readl_relaxed(pc->base + pc->data->regs.ctrl);
+
+ if (enable)
+ val |= enable_conf;
+ else
+ val &= ~enable_conf;
+
+ writel_relaxed(val, pc->base + pc->data->regs.ctrl);
+}
+
+static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ unsigned long period, duty;
+ u64 clk_rate, div;
+ int ret;
+
+ clk_rate = clk_get_rate(pc->clk);
+
+ /*
+ * Since period and duty cycle registers have a width of 32
+ * bits, every possible input period can be obtained using the
+ * default prescaler value for all practical clock rate values.
+ */
+ div = clk_rate * period_ns;
+ do_div(div, pc->data->prescaler * NSEC_PER_SEC);
+ period = div;
+
+ div = clk_rate * duty_ns;
+ do_div(div, pc->data->prescaler * NSEC_PER_SEC);
+ duty = div;
+
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+
+ writel(period, pc->base + pc->data->regs.period);
+ writel(duty, pc->base + pc->data->regs.duty);
+ writel(0, pc->base + pc->data->regs.cntr);
+
+ clk_disable(pc->clk);
+
+ return 0;
+}
+
+static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+ int ret;
+
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+
+ pc->data->set_enable(chip, true);
+
+ return 0;
+}
+
+static void rockchip_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
+
+ pc->data->set_enable(chip, false);
+
+ clk_disable(pc->clk);
+}
+
+static const struct pwm_ops rockchip_pwm_ops = {
+ .config = rockchip_pwm_config,
+ .enable = rockchip_pwm_enable,
+ .disable = rockchip_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static const struct rockchip_pwm_data pwm_data_v1 = {
+ .regs = {
+ .duty = 0x04,
+ .period = 0x08,
+ .cntr = 0x00,
+ .ctrl = 0x0c,
+ },
+ .prescaler = 2,
+ .set_enable = rockchip_pwm_set_enable_v1,
+};
+
+static const struct rockchip_pwm_data pwm_data_v2 = {
+ .regs = {
+ .duty = 0x08,
+ .period = 0x04,
+ .cntr = 0x00,
+ .ctrl = 0x0c,
+ },
+ .prescaler = 1,
+ .set_enable = rockchip_pwm_set_enable_v2,
+};
+
+static const struct rockchip_pwm_data pwm_data_vop = {
+ .regs = {
+ .duty = 0x08,
+ .period = 0x04,
+ .cntr = 0x0c,
+ .ctrl = 0x00,
+ },
+ .prescaler = 1,
+ .set_enable = rockchip_pwm_set_enable_v2,
+};
+
+static const struct of_device_id rockchip_pwm_dt_ids[] = {
+ { .compatible = "rockchip,rk2928-pwm", .data = &pwm_data_v1},
+ { .compatible = "rockchip,rk3288-pwm", .data = &pwm_data_v2},
+ { .compatible = "rockchip,vop-pwm", .data = &pwm_data_vop},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rockchip_pwm_dt_ids);
+
+static int rockchip_pwm_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct rockchip_pwm_chip *pc;
+ struct resource *r;
+ int ret;
+
+ id = of_match_device(rockchip_pwm_dt_ids, &pdev->dev);
+ if (!id)
+ return -EINVAL;
+
+ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pc->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(pc->base))
+ return PTR_ERR(pc->base);
+
+ pc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(pc->clk))
+ return PTR_ERR(pc->clk);
+
+ ret = clk_prepare(pc->clk);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pc);
+
+ pc->data = id->data;
+ pc->chip.dev = &pdev->dev;
+ pc->chip.ops = &rockchip_pwm_ops;
+ pc->chip.base = -1;
+ pc->chip.npwm = 1;
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+ clk_unprepare(pc->clk);
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static int rockchip_pwm_remove(struct platform_device *pdev)
+{
+ struct rockchip_pwm_chip *pc = platform_get_drvdata(pdev);
+
+ clk_unprepare(pc->clk);
+
+ return pwmchip_remove(&pc->chip);
+}
+
+static struct platform_driver rockchip_pwm_driver = {
+ .driver = {
+ .name = "rockchip-pwm",
+ .of_match_table = rockchip_pwm_dt_ids,
+ },
+ .probe = rockchip_pwm_probe,
+ .remove = rockchip_pwm_remove,
+};
+module_platform_driver(rockchip_pwm_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Rockchip SoC PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
new file mode 100644
index 000000000000..b95115cdaea7
--- /dev/null
+++ b/drivers/pwm/pwm-sti.c
@@ -0,0 +1,418 @@
+/*
+ * PWM device driver for ST SoCs.
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * Copyright (C) 2013-2014 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+
+#define STI_DS_REG(ch) (4 * (ch)) /* Channel's Duty Cycle register */
+#define STI_PWMCR 0x50 /* Control/Config register */
+#define STI_INTEN 0x54 /* Interrupt Enable/Disable register */
+#define PWM_PRESCALE_LOW_MASK 0x0f
+#define PWM_PRESCALE_HIGH_MASK 0xf0
+
+/* Regfield IDs */
+enum {
+ PWMCLK_PRESCALE_LOW,
+ PWMCLK_PRESCALE_HIGH,
+ PWM_EN,
+ PWM_INT_EN,
+
+ /* Keep last */
+ MAX_REGFIELDS
+};
+
+struct sti_pwm_compat_data {
+ const struct reg_field *reg_fields;
+ unsigned int num_chan;
+ unsigned int max_pwm_cnt;
+ unsigned int max_prescale;
+};
+
+struct sti_pwm_chip {
+ struct device *dev;
+ struct clk *clk;
+ unsigned long clk_rate;
+ struct regmap *regmap;
+ struct sti_pwm_compat_data *cdata;
+ struct regmap_field *prescale_low;
+ struct regmap_field *prescale_high;
+ struct regmap_field *pwm_en;
+ struct regmap_field *pwm_int_en;
+ struct pwm_chip chip;
+ struct pwm_device *cur;
+ unsigned int en_count;
+ struct mutex sti_pwm_lock; /* To sync between enable/disable calls */
+ void __iomem *mmio;
+};
+
+static const struct reg_field sti_pwm_regfields[MAX_REGFIELDS] = {
+ [PWMCLK_PRESCALE_LOW] = REG_FIELD(STI_PWMCR, 0, 3),
+ [PWMCLK_PRESCALE_HIGH] = REG_FIELD(STI_PWMCR, 11, 14),
+ [PWM_EN] = REG_FIELD(STI_PWMCR, 9, 9),
+ [PWM_INT_EN] = REG_FIELD(STI_INTEN, 0, 0),
+};
+
+static inline struct sti_pwm_chip *to_sti_pwmchip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct sti_pwm_chip, chip);
+}
+
+/*
+ * Calculate the prescaler value corresponding to the period.
+ */
+static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period,
+ unsigned int *prescale)
+{
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+ unsigned long val;
+ unsigned int ps;
+
+ /*
+ * prescale = ((period_ns * clk_rate) / (10^9 * (max_pwm_count + 1)) - 1
+ */
+ val = NSEC_PER_SEC / pc->clk_rate;
+ val *= cdata->max_pwm_cnt + 1;
+
+ if (period % val) {
+ return -EINVAL;
+ } else {
+ ps = period / val - 1;
+ if (ps > cdata->max_prescale)
+ return -EINVAL;
+ }
+ *prescale = ps;
+
+ return 0;
+}
+
+/* Calculate the number of PWM devices configured with a period. */
+static unsigned int sti_pwm_count_configured(struct pwm_chip *chip)
+{
+ struct pwm_device *pwm;
+ unsigned int ncfg = 0;
+ unsigned int i;
+
+ for (i = 0; i < chip->npwm; i++) {
+ pwm = &chip->pwms[i];
+ if (test_bit(PWMF_REQUESTED, &pwm->flags)) {
+ if (pwm_get_period(pwm))
+ ncfg++;
+ }
+ }
+
+ return ncfg;
+}
+
+/*
+ * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles.
+ * The only way to change the period (apart from changing the PWM input clock)
+ * is to change the PWM clock prescaler.
+ * The prescaler is of 8 bits, so 256 prescaler values and hence
+ * 256 possible period values are supported (for a particular clock rate).
+ * The requested period will be applied only if it matches one of these
+ * 256 values.
+ */
+static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ int duty_ns, int period_ns)
+{
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+ struct pwm_device *cur = pc->cur;
+ struct device *dev = pc->dev;
+ unsigned int prescale = 0, pwmvalx;
+ int ret;
+ unsigned int ncfg;
+ bool period_same = false;
+
+ ncfg = sti_pwm_count_configured(chip);
+ if (ncfg)
+ period_same = (period_ns == pwm_get_period(cur));
+
+ /* Allow configuration changes if one of the
+ * following conditions satisfy.
+ * 1. No channels have been configured.
+ * 2. Only one channel has been configured and the new request
+ * is for the same channel.
+ * 3. Only one channel has been configured and the new request is
+ * for a new channel and period of the new channel is same as
+ * the current configured period.
+ * 4. More than one channels are configured and period of the new
+ * requestis the same as the current period.
+ */
+ if (!ncfg ||
+ ((ncfg == 1) && (pwm->hwpwm == cur->hwpwm)) ||
+ ((ncfg == 1) && (pwm->hwpwm != cur->hwpwm) && period_same) ||
+ ((ncfg > 1) && period_same)) {
+ /* Enable clock before writing to PWM registers. */
+ ret = clk_enable(pc->clk);
+ if (ret)
+ return ret;
+
+ if (!period_same) {
+ ret = sti_pwm_get_prescale(pc, period_ns, &prescale);
+ if (ret)
+ goto clk_dis;
+
+ ret =
+ regmap_field_write(pc->prescale_low,
+ prescale & PWM_PRESCALE_LOW_MASK);
+ if (ret)
+ goto clk_dis;
+
+ ret =
+ regmap_field_write(pc->prescale_high,
+ (prescale & PWM_PRESCALE_HIGH_MASK) >> 4);
+ if (ret)
+ goto clk_dis;
+ }
+
+ /*
+ * When PWMVal == 0, PWM pulse = 1 local clock cycle.
+ * When PWMVal == max_pwm_count,
+ * PWM pulse = (max_pwm_count + 1) local cycles,
+ * that is continuous pulse: signal never goes low.
+ */
+ pwmvalx = cdata->max_pwm_cnt * duty_ns / period_ns;
+
+ ret = regmap_write(pc->regmap, STI_DS_REG(pwm->hwpwm), pwmvalx);
+ if (ret)
+ goto clk_dis;
+
+ ret = regmap_field_write(pc->pwm_int_en, 0);
+
+ pc->cur = pwm;
+
+ dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n",
+ prescale, period_ns, duty_ns, pwmvalx);
+ } else {
+ return -EINVAL;
+ }
+
+clk_dis:
+ clk_disable(pc->clk);
+ return ret;
+}
+
+static int sti_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct device *dev = pc->dev;
+ int ret = 0;
+
+ /*
+ * Since we have a common enable for all PWM channels,
+ * do not enable if already enabled.
+ */
+ mutex_lock(&pc->sti_pwm_lock);
+ if (!pc->en_count) {
+ ret = clk_enable(pc->clk);
+ if (ret)
+ goto out;
+
+ ret = regmap_field_write(pc->pwm_en, 1);
+ if (ret) {
+ dev_err(dev, "failed to enable PWM device:%d\n",
+ pwm->hwpwm);
+ goto out;
+ }
+ }
+ pc->en_count++;
+out:
+ mutex_unlock(&pc->sti_pwm_lock);
+ return ret;
+}
+
+static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+
+ mutex_lock(&pc->sti_pwm_lock);
+ if (--pc->en_count) {
+ mutex_unlock(&pc->sti_pwm_lock);
+ return;
+ }
+ regmap_field_write(pc->pwm_en, 0);
+
+ clk_disable(pc->clk);
+ mutex_unlock(&pc->sti_pwm_lock);
+}
+
+static const struct pwm_ops sti_pwm_ops = {
+ .config = sti_pwm_config,
+ .enable = sti_pwm_enable,
+ .disable = sti_pwm_disable,
+ .owner = THIS_MODULE,
+};
+
+static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
+{
+ struct device *dev = pc->dev;
+ const struct reg_field *reg_fields;
+ struct device_node *np = dev->of_node;
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+ u32 num_chan;
+
+ of_property_read_u32(np, "st,pwm-num-chan", &num_chan);
+ if (num_chan)
+ cdata->num_chan = num_chan;
+
+ reg_fields = cdata->reg_fields;
+
+ pc->prescale_low = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWMCLK_PRESCALE_LOW]);
+ if (IS_ERR(pc->prescale_low))
+ return PTR_ERR(pc->prescale_low);
+
+ pc->prescale_high = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWMCLK_PRESCALE_HIGH]);
+ if (IS_ERR(pc->prescale_high))
+ return PTR_ERR(pc->prescale_high);
+
+ pc->pwm_en = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWM_EN]);
+ if (IS_ERR(pc->pwm_en))
+ return PTR_ERR(pc->pwm_en);
+
+ pc->pwm_int_en = devm_regmap_field_alloc(dev, pc->regmap,
+ reg_fields[PWM_INT_EN]);
+ if (IS_ERR(pc->pwm_int_en))
+ return PTR_ERR(pc->pwm_int_en);
+
+ return 0;
+}
+
+static const struct regmap_config sti_pwm_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int sti_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_pwm_compat_data *cdata;
+ struct sti_pwm_chip *pc;
+ struct resource *res;
+ int ret;
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ cdata = devm_kzalloc(dev, sizeof(*cdata), GFP_KERNEL);
+ if (!cdata)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ pc->mmio = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pc->mmio))
+ return PTR_ERR(pc->mmio);
+
+ pc->regmap = devm_regmap_init_mmio(dev, pc->mmio,
+ &sti_pwm_regmap_config);
+ if (IS_ERR(pc->regmap))
+ return PTR_ERR(pc->regmap);
+
+ /*
+ * Setup PWM data with default values: some values could be replaced
+ * with specific ones provided from Device Tree.
+ */
+ cdata->reg_fields = &sti_pwm_regfields[0];
+ cdata->max_prescale = 0xff;
+ cdata->max_pwm_cnt = 255;
+ cdata->num_chan = 1;
+
+ pc->cdata = cdata;
+ pc->dev = dev;
+ pc->en_count = 0;
+ mutex_init(&pc->sti_pwm_lock);
+
+ ret = sti_pwm_probe_dt(pc);
+ if (ret)
+ return ret;
+
+ pc->clk = of_clk_get_by_name(dev->of_node, "pwm");
+ if (IS_ERR(pc->clk)) {
+ dev_err(dev, "failed to get PWM clock\n");
+ return PTR_ERR(pc->clk);
+ }
+
+ pc->clk_rate = clk_get_rate(pc->clk);
+ if (!pc->clk_rate) {
+ dev_err(dev, "failed to get clock rate\n");
+ return -EINVAL;
+ }
+
+ ret = clk_prepare(pc->clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
+
+ pc->chip.dev = dev;
+ pc->chip.ops = &sti_pwm_ops;
+ pc->chip.base = -1;
+ pc->chip.npwm = pc->cdata->num_chan;
+ pc->chip.can_sleep = true;
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+ clk_unprepare(pc->clk);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, pc);
+
+ return 0;
+}
+
+static int sti_pwm_remove(struct platform_device *pdev)
+{
+ struct sti_pwm_chip *pc = platform_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < pc->cdata->num_chan; i++)
+ pwm_disable(&pc->chip.pwms[i]);
+
+ clk_unprepare(pc->clk);
+
+ return pwmchip_remove(&pc->chip);
+}
+
+static const struct of_device_id sti_pwm_of_match[] = {
+ { .compatible = "st,sti-pwm", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sti_pwm_of_match);
+
+static struct platform_driver sti_pwm_driver = {
+ .driver = {
+ .name = "sti-pwm",
+ .of_match_table = sti_pwm_of_match,
+ },
+ .probe = sti_pwm_probe,
+ .remove = sti_pwm_remove,
+};
+module_platform_driver(sti_pwm_driver);
+
+MODULE_AUTHOR("Ajit Pal Singh <ajitpal.singh@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics ST PWM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index 3b119bc2c3c6..67481dc6da3f 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -62,10 +62,8 @@ static int pwmss_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
- if (!info) {
- dev_err(&pdev->dev, "failed to allocate memory\n");
+ if (!info)
return -ENOMEM;
- }
mutex_init(&info->pwmss_lock);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 0305675270ee..a7b42680a06a 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -644,27 +644,26 @@ enum tsi721_smsg_int_flag {
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
-#define TSI721_BDMA_BD_RING_SZ 128
#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1)
struct tsi721_tx_desc {
struct dma_async_tx_descriptor txd;
- struct tsi721_dma_desc *hw_desc;
u16 destid;
/* low 64-bits of 66-bit RIO address */
u64 rio_addr;
/* upper 2-bits of 66-bit RIO address */
u8 rio_addr_u;
- u32 bcount;
- bool interrupt;
+ enum dma_rtype rtype;
struct list_head desc_node;
- struct list_head tx_list;
+ struct scatterlist *sg;
+ unsigned int sg_len;
+ enum dma_status status;
};
struct tsi721_bdma_chan {
int id;
void __iomem *regs;
- int bd_num; /* number of buffer descriptors */
+ int bd_num; /* number of HW buffer descriptors */
void *bd_base; /* start of DMA descriptors */
dma_addr_t bd_phys;
void *sts_base; /* start of DMA BD status FIFO */
@@ -680,7 +679,6 @@ struct tsi721_bdma_chan {
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
- dma_cookie_t completed_cookie;
struct tasklet_struct tasklet;
bool active;
};
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 44341dc5b148..f64c5decb747 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -1,7 +1,7 @@
/*
* DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
*
- * Copyright 2011 Integrated Device Technology, Inc.
+ * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
* Alexandre Bounine <alexandre.bounine@idt.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -14,9 +14,8 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
*/
#include <linux/io.h>
@@ -32,9 +31,22 @@
#include <linux/interrupt.h>
#include <linux/kfifo.h>
#include <linux/delay.h>
+#include "../../dma/dmaengine.h"
#include "tsi721.h"
+#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
+
+#ifdef CONFIG_PCI_MSI
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
+#endif
+static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
+
+static unsigned int dma_desc_per_channel = 128;
+module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(dma_desc_per_channel,
+ "Number of DMA descriptors per channel (default: 128)");
+
static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
{
return container_of(chan, struct tsi721_bdma_chan, dchan);
@@ -59,7 +71,7 @@ struct tsi721_tx_desc *tsi721_dma_first_active(
struct tsi721_tx_desc, desc_node);
}
-static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
{
struct tsi721_dma_desc *bd_ptr;
struct device *dev = bdma_chan->dchan.device->dev;
@@ -67,17 +79,23 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
dma_addr_t bd_phys;
dma_addr_t sts_phys;
int sts_size;
- int bd_num = bdma_chan->bd_num;
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
+#endif
dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
- /* Allocate space for DMA descriptors */
+ /*
+ * Allocate space for DMA descriptors
+ * (add an extra element for link descriptor)
+ */
bd_ptr = dma_zalloc_coherent(dev,
- bd_num * sizeof(struct tsi721_dma_desc),
+ (bd_num + 1) * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
return -ENOMEM;
+ bdma_chan->bd_num = bd_num;
bdma_chan->bd_phys = bd_phys;
bdma_chan->bd_base = bd_ptr;
@@ -85,8 +103,8 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
bd_ptr, (unsigned long long)bd_phys);
/* Allocate space for descriptor status FIFO */
- sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
- bd_num : TSI721_DMA_MINSTSSZ;
+ sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
+ (bd_num + 1) : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
sts_ptr = dma_zalloc_coherent(dev,
sts_size * sizeof(struct tsi721_dma_sts),
@@ -94,7 +112,7 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
if (!sts_ptr) {
/* Free space allocated for DMA descriptors */
dma_free_coherent(dev,
- bd_num * sizeof(struct tsi721_dma_desc),
+ (bd_num + 1) * sizeof(struct tsi721_dma_desc),
bd_ptr, bd_phys);
bdma_chan->bd_base = NULL;
return -ENOMEM;
@@ -108,11 +126,11 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
- /* Initialize DMA descriptors ring */
- bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
- bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+ /* Initialize DMA descriptors ring using added link descriptor */
+ bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
+ bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
TSI721_DMAC_DPTRL_MASK);
- bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+ bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
/* Setup DMA descriptor pointers */
iowrite32(((u64)bd_phys >> 32),
@@ -134,6 +152,55 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+#ifdef CONFIG_PCI_MSI
+ /* Request interrupt service if we are in MSI-X mode */
+ if (priv->flags & TSI721_USING_MSIX) {
+ int rc, idx;
+
+ idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
+
+ rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
+ priv->msix[idx].irq_name, (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
+ bdma_chan->id);
+ goto err_out;
+ }
+
+ idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
+
+ rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
+ priv->msix[idx].irq_name, (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
+ bdma_chan->id);
+ free_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ (void *)bdma_chan);
+ }
+
+err_out:
+ if (rc) {
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(dev,
+ (bd_num + 1) * sizeof(struct tsi721_dma_desc),
+ bd_ptr, bd_phys);
+ bdma_chan->bd_base = NULL;
+
+ /* Free space allocated for status descriptors */
+ dma_free_coherent(dev,
+ sts_size * sizeof(struct tsi721_dma_sts),
+ sts_ptr, sts_phys);
+ bdma_chan->sts_base = NULL;
+
+ return -EIO;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
/* Toggle DMA channel initialization */
iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
@@ -147,6 +214,9 @@ static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
{
u32 ch_stat;
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
+#endif
if (bdma_chan->bd_base == NULL)
return 0;
@@ -159,9 +229,18 @@ static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
/* Put DMA channel into init state */
iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ }
+#endif /* CONFIG_PCI_MSI */
+
/* Free space allocated for DMA descriptors */
dma_free_coherent(bdma_chan->dchan.device->dev,
- bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+ (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
bdma_chan->bd_base, bdma_chan->bd_phys);
bdma_chan->bd_base = NULL;
@@ -243,8 +322,8 @@ static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
}
dev_dbg(bdma_chan->dchan.device->dev,
- "tx_chan: %p, chan: %d, regs: %p\n",
- bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+ "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
+ bdma_chan->wr_count_next);
iowrite32(bdma_chan->wr_count_next,
bdma_chan->regs + TSI721_DMAC_DWRCNT);
@@ -253,72 +332,19 @@ static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
bdma_chan->wr_count = bdma_chan->wr_count_next;
}
-static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
- struct tsi721_tx_desc *desc)
-{
- dev_dbg(bdma_chan->dchan.device->dev,
- "Put desc: %p into free list\n", desc);
-
- if (desc) {
- spin_lock_bh(&bdma_chan->lock);
- list_splice_init(&desc->tx_list, &bdma_chan->free_list);
- list_add(&desc->desc_node, &bdma_chan->free_list);
- bdma_chan->wr_count_next = bdma_chan->wr_count;
- spin_unlock_bh(&bdma_chan->lock);
- }
-}
-
-static
-struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
-{
- struct tsi721_tx_desc *tx_desc, *_tx_desc;
- struct tsi721_tx_desc *ret = NULL;
- int i;
-
- spin_lock_bh(&bdma_chan->lock);
- list_for_each_entry_safe(tx_desc, _tx_desc,
- &bdma_chan->free_list, desc_node) {
- if (async_tx_test_ack(&tx_desc->txd)) {
- list_del(&tx_desc->desc_node);
- ret = tx_desc;
- break;
- }
- dev_dbg(bdma_chan->dchan.device->dev,
- "desc %p not ACKed\n", tx_desc);
- }
-
- if (ret == NULL) {
- dev_dbg(bdma_chan->dchan.device->dev,
- "%s: unable to obtain tx descriptor\n", __func__);
- goto err_out;
- }
-
- i = bdma_chan->wr_count_next % bdma_chan->bd_num;
- if (i == bdma_chan->bd_num - 1) {
- i = 0;
- bdma_chan->wr_count_next++; /* skip link descriptor */
- }
-
- bdma_chan->wr_count_next++;
- tx_desc->txd.phys = bdma_chan->bd_phys +
- i * sizeof(struct tsi721_dma_desc);
- tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-err_out:
- spin_unlock_bh(&bdma_chan->lock);
-
- return ret;
-}
-
static int
-tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg,
- enum dma_rtype rtype, u32 sys_size)
+tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
+ struct tsi721_dma_desc *bd_ptr,
+ struct scatterlist *sg, u32 sys_size)
{
- struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
u64 rio_addr;
+ if (bd_ptr == NULL)
+ return -EINVAL;
+
/* Initialize DMA descriptor */
bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
- (rtype << 19) | desc->destid);
+ (desc->rtype << 19) | desc->destid);
bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
(sys_size << 26));
rio_addr = (desc->rio_addr >> 2) |
@@ -335,51 +361,32 @@ tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg,
}
static int
-tsi721_desc_fill_end(struct tsi721_tx_desc *desc)
+tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
{
- struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+ if (bd_ptr == NULL)
+ return -EINVAL;
/* Update DMA descriptor */
- if (desc->interrupt)
+ if (interrupt)
bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
- bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1);
+ bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
return 0;
}
-
-static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
- struct tsi721_tx_desc *desc)
+static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
- list_splice_init(&desc->tx_list, &bdma_chan->free_list);
list_move(&desc->desc_node, &bdma_chan->free_list);
- bdma_chan->completed_cookie = txd->cookie;
if (callback)
callback(param);
}
-static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
-{
- struct tsi721_tx_desc *desc, *_d;
- LIST_HEAD(list);
-
- BUG_ON(!tsi721_dma_is_idle(bdma_chan));
-
- if (!list_empty(&bdma_chan->queue))
- tsi721_start_dma(bdma_chan);
-
- list_splice_init(&bdma_chan->active_list, &list);
- list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
-
- list_for_each_entry_safe(desc, _d, &list, desc_node)
- tsi721_dma_chain_complete(bdma_chan, desc);
-}
-
static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
{
u32 srd_ptr;
@@ -403,20 +410,159 @@ static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
bdma_chan->sts_rdptr = srd_ptr;
}
+/* Must be called with the channel spinlock held */
+static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
+{
+ struct dma_chan *dchan = desc->txd.chan;
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ u32 sys_size;
+ u64 rio_addr;
+ dma_addr_t next_addr;
+ u32 bcount;
+ struct scatterlist *sg;
+ unsigned int i;
+ int err = 0;
+ struct tsi721_dma_desc *bd_ptr = NULL;
+ u32 idx, rd_idx;
+ u32 add_count = 0;
+
+ if (!tsi721_dma_is_idle(bdma_chan)) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to use non-idle channel\n");
+ return -EIO;
+ }
+
+ /*
+ * Fill DMA channel's hardware buffer descriptors.
+ * (NOTE: RapidIO destination address is limited to 64 bits for now)
+ */
+ rio_addr = desc->rio_addr;
+ next_addr = -1;
+ bcount = 0;
+ sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
+
+ rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
+ rd_idx %= (bdma_chan->bd_num + 1);
+
+ idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
+ if (idx == bdma_chan->bd_num) {
+ /* wrap around link descriptor */
+ idx = 0;
+ add_count++;
+ }
+
+ dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
+ __func__, rd_idx, idx);
+
+ for_each_sg(desc->sg, sg, desc->sg_len, i) {
+
+ dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
+ i, desc->sg_len,
+ (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
+
+ if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
+ dev_err(dchan->device->dev,
+ "%s: SG entry %d is too large\n", __func__, i);
+ err = -EINVAL;
+ break;
+ }
+
+ /*
+ * If this sg entry forms contiguous block with previous one,
+ * try to merge it into existing DMA descriptor
+ */
+ if (next_addr == sg_dma_address(sg) &&
+ bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
+ /* Adjust byte count of the descriptor */
+ bcount += sg_dma_len(sg);
+ goto entry_done;
+ } else if (next_addr != -1) {
+ /* Finalize descriptor using total byte count value */
+ tsi721_desc_fill_end(bd_ptr, bcount, 0);
+ dev_dbg(dchan->device->dev,
+ "%s: prev desc final len: %d\n",
+ __func__, bcount);
+ }
+
+ desc->rio_addr = rio_addr;
+
+ if (i && idx == rd_idx) {
+ dev_dbg(dchan->device->dev,
+ "%s: HW descriptor ring is full @ %d\n",
+ __func__, i);
+ desc->sg = sg;
+ desc->sg_len -= i;
+ break;
+ }
+
+ bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
+ err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
+ if (err) {
+ dev_err(dchan->device->dev,
+ "Failed to build desc: err=%d\n", err);
+ break;
+ }
+
+ dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
+ bd_ptr, desc->destid, desc->rio_addr);
+
+ next_addr = sg_dma_address(sg);
+ bcount = sg_dma_len(sg);
+
+ add_count++;
+ if (++idx == bdma_chan->bd_num) {
+ /* wrap around link descriptor */
+ idx = 0;
+ add_count++;
+ }
+
+entry_done:
+ if (sg_is_last(sg)) {
+ tsi721_desc_fill_end(bd_ptr, bcount, 0);
+ dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
+ __func__, bcount);
+ desc->sg_len = 0;
+ } else {
+ rio_addr += sg_dma_len(sg);
+ next_addr += sg_dma_len(sg);
+ }
+ }
+
+ if (!err)
+ bdma_chan->wr_count_next += add_count;
+
+ return err;
+}
+
static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
{
- if (list_empty(&bdma_chan->active_list) ||
- list_is_singular(&bdma_chan->active_list)) {
- dev_dbg(bdma_chan->dchan.device->dev,
- "%s: Active_list empty\n", __func__);
- tsi721_dma_complete_all(bdma_chan);
- } else {
- dev_dbg(bdma_chan->dchan.device->dev,
- "%s: Active_list NOT empty\n", __func__);
- tsi721_dma_chain_complete(bdma_chan,
- tsi721_dma_first_active(bdma_chan));
- tsi721_start_dma(bdma_chan);
+ struct tsi721_tx_desc *desc;
+ int err;
+
+ dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
+
+ /*
+ * If there are any new transactions in the queue add them
+ * into the processing list
+ */
+ if (!list_empty(&bdma_chan->queue))
+ list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+ /* Start new transaction (if available) */
+ if (!list_empty(&bdma_chan->active_list)) {
+ desc = tsi721_dma_first_active(bdma_chan);
+ err = tsi721_submit_sg(desc);
+ if (!err)
+ tsi721_start_dma(bdma_chan);
+ else {
+ tsi721_dma_tx_err(bdma_chan, desc);
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "ERR: tsi721_submit_sg failed with err=%d\n",
+ err);
+ }
}
+
+ dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
}
static void tsi721_dma_tasklet(unsigned long data)
@@ -444,8 +590,29 @@ static void tsi721_dma_tasklet(unsigned long data)
}
if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+ struct tsi721_tx_desc *desc;
+
tsi721_clr_stat(bdma_chan);
spin_lock(&bdma_chan->lock);
+ desc = tsi721_dma_first_active(bdma_chan);
+
+ if (desc->sg_len == 0) {
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
+
+ desc->status = DMA_COMPLETE;
+ dma_cookie_complete(&desc->txd);
+ if (desc->txd.flags & DMA_PREP_INTERRUPT) {
+ callback = desc->txd.callback;
+ param = desc->txd.callback_param;
+ }
+ list_move(&desc->desc_node, &bdma_chan->free_list);
+ spin_unlock(&bdma_chan->lock);
+ if (callback)
+ callback(param);
+ spin_lock(&bdma_chan->lock);
+ }
+
tsi721_advance_work(bdma_chan);
spin_unlock(&bdma_chan->lock);
}
@@ -460,21 +627,24 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
dma_cookie_t cookie;
- spin_lock_bh(&bdma_chan->lock);
+ /* Check if the descriptor is detached from any lists */
+ if (!list_empty(&desc->desc_node)) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: wrong state of descriptor %p\n", __func__, txd);
+ return -EIO;
+ }
- cookie = txd->chan->cookie;
- if (++cookie < 0)
- cookie = 1;
- txd->chan->cookie = cookie;
- txd->cookie = cookie;
+ spin_lock_bh(&bdma_chan->lock);
- if (list_empty(&bdma_chan->active_list)) {
- list_add_tail(&desc->desc_node, &bdma_chan->active_list);
- tsi721_start_dma(bdma_chan);
- } else {
- list_add_tail(&desc->desc_node, &bdma_chan->queue);
+ if (!bdma_chan->active) {
+ spin_unlock_bh(&bdma_chan->lock);
+ return -ENODEV;
}
+ cookie = dma_cookie_assign(txd);
+ desc->status = DMA_IN_PROGRESS;
+ list_add_tail(&desc->desc_node, &bdma_chan->queue);
+
spin_unlock_bh(&bdma_chan->lock);
return cookie;
}
@@ -482,115 +652,52 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
-#ifdef CONFIG_PCI_MSI
- struct tsi721_device *priv = to_tsi721(dchan->device);
-#endif
struct tsi721_tx_desc *desc = NULL;
- LIST_HEAD(tmp_list);
int i;
- int rc;
+
+ dev_dbg(dchan->device->dev, "%s: for channel %d\n",
+ __func__, bdma_chan->id);
if (bdma_chan->bd_base)
- return bdma_chan->bd_num - 1;
+ return TSI721_DMA_TX_QUEUE_SZ;
/* Initialize BDMA channel */
- if (tsi721_bdma_ch_init(bdma_chan)) {
+ if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
dev_err(dchan->device->dev, "Unable to initialize data DMA"
" channel %d, aborting\n", bdma_chan->id);
- return -ENOMEM;
+ return -ENODEV;
}
- /* Alocate matching number of logical descriptors */
- desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+ /* Allocate queue of transaction descriptors */
+ desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
GFP_KERNEL);
if (!desc) {
dev_err(dchan->device->dev,
"Failed to allocate logical descriptors\n");
- rc = -ENOMEM;
- goto err_out;
+ tsi721_bdma_ch_free(bdma_chan);
+ return -ENOMEM;
}
bdma_chan->tx_desc = desc;
- for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+ for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
dma_async_tx_descriptor_init(&desc[i].txd, dchan);
desc[i].txd.tx_submit = tsi721_tx_submit;
desc[i].txd.flags = DMA_CTRL_ACK;
- INIT_LIST_HEAD(&desc[i].tx_list);
- list_add_tail(&desc[i].desc_node, &tmp_list);
+ list_add(&desc[i].desc_node, &bdma_chan->free_list);
}
- spin_lock_bh(&bdma_chan->lock);
- list_splice(&tmp_list, &bdma_chan->free_list);
- bdma_chan->completed_cookie = dchan->cookie = 1;
- spin_unlock_bh(&bdma_chan->lock);
-
-#ifdef CONFIG_PCI_MSI
- if (priv->flags & TSI721_USING_MSIX) {
- /* Request interrupt service if we are in MSI-X mode */
- rc = request_irq(
- priv->msix[TSI721_VECT_DMA0_DONE +
- bdma_chan->id].vector,
- tsi721_bdma_msix, 0,
- priv->msix[TSI721_VECT_DMA0_DONE +
- bdma_chan->id].irq_name,
- (void *)bdma_chan);
-
- if (rc) {
- dev_dbg(dchan->device->dev,
- "Unable to allocate MSI-X interrupt for "
- "BDMA%d-DONE\n", bdma_chan->id);
- goto err_out;
- }
-
- rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
- bdma_chan->id].vector,
- tsi721_bdma_msix, 0,
- priv->msix[TSI721_VECT_DMA0_INT +
- bdma_chan->id].irq_name,
- (void *)bdma_chan);
-
- if (rc) {
- dev_dbg(dchan->device->dev,
- "Unable to allocate MSI-X interrupt for "
- "BDMA%d-INT\n", bdma_chan->id);
- free_irq(
- priv->msix[TSI721_VECT_DMA0_DONE +
- bdma_chan->id].vector,
- (void *)bdma_chan);
- rc = -EIO;
- goto err_out;
- }
- }
-#endif /* CONFIG_PCI_MSI */
+ dma_cookie_init(dchan);
bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
- return bdma_chan->bd_num - 1;
-
-err_out:
- kfree(desc);
- tsi721_bdma_ch_free(bdma_chan);
- return rc;
+ return TSI721_DMA_TX_QUEUE_SZ;
}
-static void tsi721_free_chan_resources(struct dma_chan *dchan)
+static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
{
- struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- struct tsi721_device *priv = to_tsi721(dchan->device);
- LIST_HEAD(list);
-
- dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
-
- if (bdma_chan->bd_base == NULL)
- return;
-
- BUG_ON(!list_empty(&bdma_chan->active_list));
- BUG_ON(!list_empty(&bdma_chan->queue));
-
- tsi721_bdma_interrupt_enable(bdma_chan, 0);
- bdma_chan->active = false;
+ struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
@@ -601,64 +708,48 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
} else
#endif
synchronize_irq(priv->pdev->irq);
+}
- tasklet_kill(&bdma_chan->tasklet);
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- spin_lock_bh(&bdma_chan->lock);
- list_splice_init(&bdma_chan->free_list, &list);
- spin_unlock_bh(&bdma_chan->lock);
+ dev_dbg(dchan->device->dev, "%s: for channel %d\n",
+ __func__, bdma_chan->id);
-#ifdef CONFIG_PCI_MSI
- if (priv->flags & TSI721_USING_MSIX) {
- free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
- bdma_chan->id].vector, (void *)bdma_chan);
- free_irq(priv->msix[TSI721_VECT_DMA0_INT +
- bdma_chan->id].vector, (void *)bdma_chan);
- }
-#endif /* CONFIG_PCI_MSI */
+ if (bdma_chan->bd_base == NULL)
+ return;
- tsi721_bdma_ch_free(bdma_chan);
+ BUG_ON(!list_empty(&bdma_chan->active_list));
+ BUG_ON(!list_empty(&bdma_chan->queue));
+
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+ bdma_chan->active = false;
+ tsi721_sync_dma_irq(bdma_chan);
+ tasklet_kill(&bdma_chan->tasklet);
+ INIT_LIST_HEAD(&bdma_chan->free_list);
kfree(bdma_chan->tx_desc);
+ tsi721_bdma_ch_free(bdma_chan);
}
static
enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
- struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- dma_cookie_t last_used;
- dma_cookie_t last_completed;
- int ret;
-
- spin_lock_bh(&bdma_chan->lock);
- last_completed = bdma_chan->completed_cookie;
- last_used = dchan->cookie;
- spin_unlock_bh(&bdma_chan->lock);
-
- ret = dma_async_is_complete(cookie, last_completed, last_used);
-
- dma_set_tx_state(txstate, last_completed, last_used, 0);
-
- dev_dbg(dchan->device->dev,
- "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
- __func__, ret, last_completed, last_used);
-
- return ret;
+ return dma_cookie_status(dchan, cookie, txstate);
}
static void tsi721_issue_pending(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+ dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
- if (tsi721_dma_is_idle(bdma_chan)) {
+ if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
spin_lock_bh(&bdma_chan->lock);
tsi721_advance_work(bdma_chan);
spin_unlock_bh(&bdma_chan->lock);
- } else
- dev_dbg(dchan->device->dev,
- "%s: DMA channel still busy\n", __func__);
+ }
}
static
@@ -668,21 +759,19 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
void *tinfo)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
- struct tsi721_tx_desc *desc = NULL;
- struct tsi721_tx_desc *first = NULL;
- struct scatterlist *sg;
+ struct tsi721_tx_desc *desc, *_d;
struct rio_dma_ext *rext = tinfo;
- u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
- unsigned int i;
- u32 sys_size = dma_to_mport(dchan->device)->sys_size;
enum dma_rtype rtype;
- dma_addr_t next_addr = -1;
+ struct dma_async_tx_descriptor *txd = NULL;
if (!sgl || !sg_len) {
dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
return NULL;
}
+ dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
+ (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
+
if (dir == DMA_DEV_TO_MEM)
rtype = NREAD;
else if (dir == DMA_MEM_TO_DEV) {
@@ -704,97 +793,26 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
return NULL;
}
- for_each_sg(sgl, sg, sg_len, i) {
- int err;
-
- if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
- dev_err(dchan->device->dev,
- "%s: SG entry %d is too large\n", __func__, i);
- goto err_desc_put;
- }
-
- /*
- * If this sg entry forms contiguous block with previous one,
- * try to merge it into existing DMA descriptor
- */
- if (desc) {
- if (next_addr == sg_dma_address(sg) &&
- desc->bcount + sg_dma_len(sg) <=
- TSI721_BDMA_MAX_BCOUNT) {
- /* Adjust byte count of the descriptor */
- desc->bcount += sg_dma_len(sg);
- goto entry_done;
- }
-
- /*
- * Finalize this descriptor using total
- * byte count value.
- */
- tsi721_desc_fill_end(desc);
- dev_dbg(dchan->device->dev, "%s: desc final len: %d\n",
- __func__, desc->bcount);
- }
-
- /*
- * Obtain and initialize a new descriptor
- */
- desc = tsi721_desc_get(bdma_chan);
- if (!desc) {
- dev_err(dchan->device->dev,
- "%s: Failed to get new descriptor for SG %d\n",
- __func__, i);
- goto err_desc_put;
- }
-
- desc->destid = rext->destid;
- desc->rio_addr = rio_addr;
- desc->rio_addr_u = 0;
- desc->bcount = sg_dma_len(sg);
-
- dev_dbg(dchan->device->dev,
- "sg%d desc: 0x%llx, addr: 0x%llx len: %d\n",
- i, (u64)desc->txd.phys,
- (unsigned long long)sg_dma_address(sg),
- sg_dma_len(sg));
-
- dev_dbg(dchan->device->dev,
- "bd_ptr = %p did=%d raddr=0x%llx\n",
- desc->hw_desc, desc->destid, desc->rio_addr);
-
- err = tsi721_desc_fill_init(desc, sg, rtype, sys_size);
- if (err) {
- dev_err(dchan->device->dev,
- "Failed to build desc: %d\n", err);
- goto err_desc_put;
- }
-
- next_addr = sg_dma_address(sg);
-
- if (!first)
- first = desc;
- else
- list_add_tail(&desc->desc_node, &first->tx_list);
+ spin_lock_bh(&bdma_chan->lock);
-entry_done:
- if (sg_is_last(sg)) {
- desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
- tsi721_desc_fill_end(desc);
- dev_dbg(dchan->device->dev, "%s: desc final len: %d\n",
- __func__, desc->bcount);
- } else {
- rio_addr += sg_dma_len(sg);
- next_addr += sg_dma_len(sg);
+ list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del_init(&desc->desc_node);
+ desc->destid = rext->destid;
+ desc->rio_addr = rext->rio_addr;
+ desc->rio_addr_u = 0;
+ desc->rtype = rtype;
+ desc->sg_len = sg_len;
+ desc->sg = sgl;
+ txd = &desc->txd;
+ txd->flags = flags;
+ break;
}
}
- first->txd.cookie = -EBUSY;
- desc->txd.flags = flags;
-
- return &first->txd;
+ spin_unlock_bh(&bdma_chan->lock);
-err_desc_put:
- tsi721_desc_put(bdma_chan, first);
- return NULL;
+ return txd;
}
static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
@@ -802,23 +820,34 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
struct tsi721_tx_desc *desc, *_d;
+ u32 dmac_int;
LIST_HEAD(list);
dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ return -ENOSYS;
spin_lock_bh(&bdma_chan->lock);
- /* make sure to stop the transfer */
- iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+ bdma_chan->active = false;
+
+ if (!tsi721_dma_is_idle(bdma_chan)) {
+ /* make sure to stop the transfer */
+ iowrite32(TSI721_DMAC_CTL_SUSP,
+ bdma_chan->regs + TSI721_DMAC_CTL);
+
+ /* Wait until DMA channel stops */
+ do {
+ dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
+ }
list_splice_init(&bdma_chan->active_list, &list);
list_splice_init(&bdma_chan->queue, &list);
list_for_each_entry_safe(desc, _d, &list, desc_node)
- tsi721_dma_chain_complete(bdma_chan, desc);
+ tsi721_dma_tx_err(bdma_chan, desc);
spin_unlock_bh(&bdma_chan->lock);
@@ -828,22 +857,18 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
int tsi721_register_dma(struct tsi721_device *priv)
{
int i;
- int nr_channels = TSI721_DMA_MAXCH;
+ int nr_channels = 0;
int err;
struct rio_mport *mport = priv->mport;
- mport->dma.dev = &priv->pdev->dev;
- mport->dma.chancnt = nr_channels;
-
INIT_LIST_HEAD(&mport->dma.channels);
- for (i = 0; i < nr_channels; i++) {
+ for (i = 0; i < TSI721_DMA_MAXCH; i++) {
struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
if (i == TSI721_DMACH_MAINT)
continue;
- bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ;
bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
bdma_chan->dchan.device = &mport->dma;
@@ -862,12 +887,15 @@ int tsi721_register_dma(struct tsi721_device *priv)
(unsigned long)bdma_chan);
list_add_tail(&bdma_chan->dchan.device_node,
&mport->dma.channels);
+ nr_channels++;
}
+ mport->dma.chancnt = nr_channels;
dma_cap_zero(mport->dma.cap_mask);
dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+ mport->dma.dev = &priv->pdev->dev;
mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
mport->dma.device_tx_status = tsi721_tx_status;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index a54ba0494dd3..d7b87c64b7cd 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1509,30 +1509,39 @@ EXPORT_SYMBOL_GPL(rio_route_clr_table);
static bool rio_chan_filter(struct dma_chan *chan, void *arg)
{
- struct rio_dev *rdev = arg;
+ struct rio_mport *mport = arg;
/* Check that DMA device belongs to the right MPORT */
- return (rdev->net->hport ==
- container_of(chan->device, struct rio_mport, dma));
+ return mport == container_of(chan->device, struct rio_mport, dma);
}
/**
- * rio_request_dma - request RapidIO capable DMA channel that supports
- * specified target RapidIO device.
- * @rdev: RIO device control structure
+ * rio_request_mport_dma - request RapidIO capable DMA channel associated
+ * with specified local RapidIO mport device.
+ * @mport: RIO mport to perform DMA data transfers
*
* Returns pointer to allocated DMA channel or NULL if failed.
*/
-struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+struct dma_chan *rio_request_mport_dma(struct rio_mport *mport)
{
dma_cap_mask_t mask;
- struct dma_chan *dchan;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+ return dma_request_channel(mask, rio_chan_filter, mport);
+}
+EXPORT_SYMBOL_GPL(rio_request_mport_dma);
- return dchan;
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ * specified target RapidIO device.
+ * @rdev: RIO device associated with DMA transfer
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+ return rio_request_mport_dma(rdev->net->hport);
}
EXPORT_SYMBOL_GPL(rio_request_dma);
@@ -1547,10 +1556,10 @@ void rio_release_dma(struct dma_chan *dchan)
EXPORT_SYMBOL_GPL(rio_release_dma);
/**
- * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ * rio_dma_prep_xfer - RapidIO specific wrapper
* for device_prep_slave_sg callback defined by DMAENGINE.
- * @rdev: RIO device control structure
* @dchan: DMA channel to configure
+ * @destid: target RapidIO device destination ID
* @data: RIO specific data descriptor
* @direction: DMA data transfer direction (TO or FROM the device)
* @flags: dmaengine defined flags
@@ -1560,11 +1569,10 @@ EXPORT_SYMBOL_GPL(rio_release_dma);
* target RIO device.
* Returns pointer to DMA transaction descriptor or NULL if failed.
*/
-struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
- struct dma_chan *dchan, struct rio_dma_data *data,
+struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
+ u16 destid, struct rio_dma_data *data,
enum dma_transfer_direction direction, unsigned long flags)
{
- struct dma_async_tx_descriptor *txd = NULL;
struct rio_dma_ext rio_ext;
if (dchan->device->device_prep_slave_sg == NULL) {
@@ -1572,15 +1580,35 @@ struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
return NULL;
}
- rio_ext.destid = rdev->destid;
+ rio_ext.destid = destid;
rio_ext.rio_addr_u = data->rio_addr_u;
rio_ext.rio_addr = data->rio_addr;
rio_ext.wr_type = data->wr_type;
- txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
- direction, flags, &rio_ext);
+ return dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+ direction, flags, &rio_ext);
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_xfer);
- return txd;
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ * for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+ struct dma_chan *dchan, struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags);
}
EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 2b7e9e220497..b16c53a8272f 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -31,6 +31,7 @@
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps14.h>
+#include <linux/mfd/samsung/s2mpu02.h>
struct s2mps11_info {
unsigned int rdev_num;
@@ -40,11 +41,15 @@ struct s2mps11_info {
int ramp_delay16;
int ramp_delay7810;
int ramp_delay9;
+
+ enum sec_device_type dev_type;
+
/*
- * One bit for each S2MPS14 regulator whether the suspend mode
+ * One bit for each S2MPS14/S2MPU02 regulator whether the suspend mode
* was enabled.
*/
- unsigned int s2mps14_suspend_state:30;
+ unsigned long long s2mps14_suspend_state:35;
+
/* Array of size rdev_num with GPIO-s for external sleep control */
int *ext_control_gpio;
};
@@ -415,12 +420,24 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
unsigned int val;
- if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
- val = S2MPS14_ENABLE_SUSPEND;
- else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
- val = S2MPS14_ENABLE_EXT_CONTROL;
- else
- val = rdev->desc->enable_mask;
+ switch (s2mps11->dev_type) {
+ case S2MPS14X:
+ if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ val = S2MPS14_ENABLE_SUSPEND;
+ else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
+ val = S2MPS14_ENABLE_EXT_CONTROL;
+ else
+ val = rdev->desc->enable_mask;
+ break;
+ case S2MPU02:
+ if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
+ val = S2MPU02_ENABLE_SUSPEND;
+ else
+ val = rdev->desc->enable_mask;
+ break;
+ default:
+ return -EINVAL;
+ };
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val);
@@ -429,12 +446,38 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
{
int ret;
- unsigned int val;
+ unsigned int val, state;
struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ int rdev_id = rdev_get_id(rdev);
- /* LDO3 should be always on and does not support suspend mode */
- if (rdev_get_id(rdev) == S2MPS14_LDO3)
- return 0;
+ /* Below LDO should be always on or does not support suspend mode. */
+ switch (s2mps11->dev_type) {
+ case S2MPS14X:
+ switch (rdev_id) {
+ case S2MPS14_LDO3:
+ return 0;
+ default:
+ state = S2MPS14_ENABLE_SUSPEND;
+ break;
+ };
+ break;
+ case S2MPU02:
+ switch (rdev_id) {
+ case S2MPU02_LDO13:
+ case S2MPU02_LDO14:
+ case S2MPU02_LDO15:
+ case S2MPU02_LDO17:
+ case S2MPU02_BUCK7:
+ state = S2MPU02_DISABLE_SUSPEND;
+ break;
+ default:
+ state = S2MPU02_ENABLE_SUSPEND;
+ break;
+ };
+ break;
+ default:
+ return -EINVAL;
+ };
ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
if (ret < 0)
@@ -452,7 +495,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
return 0;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, S2MPS14_ENABLE_SUSPEND);
+ rdev->desc->enable_mask, state);
}
static struct regulator_ops s2mps14_reg_ops = {
@@ -605,8 +648,7 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
}
static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
- struct of_regulator_match *rdata, struct s2mps11_info *s2mps11,
- enum sec_device_type dev_type)
+ struct of_regulator_match *rdata, struct s2mps11_info *s2mps11)
{
struct device_node *reg_np;
@@ -617,7 +659,7 @@ static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
}
of_regulator_match(&pdev->dev, reg_np, rdata, s2mps11->rdev_num);
- if (dev_type == S2MPS14X)
+ if (s2mps11->dev_type == S2MPS14X)
s2mps14_pmic_dt_parse_ext_control_gpio(pdev, rdata, s2mps11);
of_node_put(reg_np);
@@ -625,6 +667,238 @@ static int s2mps11_pmic_dt_parse(struct platform_device *pdev,
return 0;
}
+static int s2mpu02_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_val, ramp_shift, ramp_reg;
+
+ switch (rdev_get_id(rdev)) {
+ case S2MPU02_BUCK1:
+ ramp_shift = S2MPU02_BUCK1_RAMP_SHIFT;
+ break;
+ case S2MPU02_BUCK2:
+ ramp_shift = S2MPU02_BUCK2_RAMP_SHIFT;
+ break;
+ case S2MPU02_BUCK3:
+ ramp_shift = S2MPU02_BUCK3_RAMP_SHIFT;
+ break;
+ case S2MPU02_BUCK4:
+ ramp_shift = S2MPU02_BUCK4_RAMP_SHIFT;
+ break;
+ default:
+ return 0;
+ }
+ ramp_reg = S2MPU02_REG_RAMP1;
+ ramp_val = get_ramp_delay(ramp_delay);
+
+ return regmap_update_bits(rdev->regmap, ramp_reg,
+ S2MPU02_BUCK1234_RAMP_MASK << ramp_shift,
+ ramp_val << ramp_shift);
+}
+
+static struct regulator_ops s2mpu02_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = s2mps14_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_disable = s2mps14_regulator_set_suspend_disable,
+};
+
+static struct regulator_ops s2mpu02_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = s2mps14_regulator_enable,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_suspend_disable = s2mps14_regulator_set_suspend_disable,
+ .set_ramp_delay = s2mpu02_set_ramp_delay,
+};
+
+#define regulator_desc_s2mpu02_ldo1(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPU02_LDO##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_LDO_MIN_900MV, \
+ .uV_step = S2MPU02_LDO_STEP_12_5MV, \
+ .linear_min_sel = S2MPU02_LDO_GROUP1_START_SEL, \
+ .n_voltages = S2MPU02_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPU02_REG_L1CTRL, \
+ .vsel_mask = S2MPU02_LDO_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_L1CTRL, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+#define regulator_desc_s2mpu02_ldo2(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPU02_LDO##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_LDO_MIN_1050MV, \
+ .uV_step = S2MPU02_LDO_STEP_25MV, \
+ .linear_min_sel = S2MPU02_LDO_GROUP2_START_SEL, \
+ .n_voltages = S2MPU02_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPU02_REG_L2CTRL1, \
+ .vsel_mask = S2MPU02_LDO_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_L2CTRL1, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+#define regulator_desc_s2mpu02_ldo3(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPU02_LDO##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_LDO_MIN_900MV, \
+ .uV_step = S2MPU02_LDO_STEP_12_5MV, \
+ .linear_min_sel = S2MPU02_LDO_GROUP1_START_SEL, \
+ .n_voltages = S2MPU02_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \
+ .vsel_mask = S2MPU02_LDO_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_L3CTRL + num - 3, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+#define regulator_desc_s2mpu02_ldo4(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPU02_LDO##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_LDO_MIN_1050MV, \
+ .uV_step = S2MPU02_LDO_STEP_25MV, \
+ .linear_min_sel = S2MPU02_LDO_GROUP2_START_SEL, \
+ .n_voltages = S2MPU02_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \
+ .vsel_mask = S2MPU02_LDO_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_L3CTRL + num - 3, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+#define regulator_desc_s2mpu02_ldo5(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPU02_LDO##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_LDO_MIN_1600MV, \
+ .uV_step = S2MPU02_LDO_STEP_50MV, \
+ .linear_min_sel = S2MPU02_LDO_GROUP3_START_SEL, \
+ .n_voltages = S2MPU02_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPU02_REG_L3CTRL + num - 3, \
+ .vsel_mask = S2MPU02_LDO_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_L3CTRL + num - 3, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+
+#define regulator_desc_s2mpu02_buck1234(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPU02_BUCK##num, \
+ .ops = &s2mpu02_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_BUCK1234_MIN_600MV, \
+ .uV_step = S2MPU02_BUCK1234_STEP_6_25MV, \
+ .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPU02_BUCK1234_START_SEL, \
+ .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPU02_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+#define regulator_desc_s2mpu02_buck5(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPU02_BUCK##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_BUCK5_MIN_1081_25MV, \
+ .uV_step = S2MPU02_BUCK5_STEP_6_25MV, \
+ .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPU02_BUCK5_START_SEL, \
+ .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPU02_REG_B5CTRL2, \
+ .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_B5CTRL1, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+#define regulator_desc_s2mpu02_buck6(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPU02_BUCK##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_BUCK6_MIN_1700MV, \
+ .uV_step = S2MPU02_BUCK6_STEP_2_50MV, \
+ .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPU02_BUCK6_START_SEL, \
+ .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPU02_REG_B6CTRL2, \
+ .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_B6CTRL1, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+#define regulator_desc_s2mpu02_buck7(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPU02_BUCK##num, \
+ .ops = &s2mpu02_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPU02_BUCK7_MIN_900MV, \
+ .uV_step = S2MPU02_BUCK7_STEP_6_25MV, \
+ .n_voltages = S2MPU02_BUCK_N_VOLTAGES, \
+ .linear_min_sel = S2MPU02_BUCK7_START_SEL, \
+ .ramp_delay = S2MPU02_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPU02_REG_B7CTRL2, \
+ .vsel_mask = S2MPU02_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPU02_REG_B7CTRL1, \
+ .enable_mask = S2MPU02_ENABLE_MASK \
+}
+
+static const struct regulator_desc s2mpu02_regulators[] = {
+ regulator_desc_s2mpu02_ldo1(1),
+ regulator_desc_s2mpu02_ldo2(2),
+ regulator_desc_s2mpu02_ldo4(3),
+ regulator_desc_s2mpu02_ldo5(4),
+ regulator_desc_s2mpu02_ldo4(5),
+ regulator_desc_s2mpu02_ldo3(6),
+ regulator_desc_s2mpu02_ldo3(7),
+ regulator_desc_s2mpu02_ldo4(8),
+ regulator_desc_s2mpu02_ldo5(9),
+ regulator_desc_s2mpu02_ldo3(10),
+ regulator_desc_s2mpu02_ldo4(11),
+ regulator_desc_s2mpu02_ldo5(12),
+ regulator_desc_s2mpu02_ldo5(13),
+ regulator_desc_s2mpu02_ldo5(14),
+ regulator_desc_s2mpu02_ldo5(15),
+ regulator_desc_s2mpu02_ldo5(16),
+ regulator_desc_s2mpu02_ldo4(17),
+ regulator_desc_s2mpu02_ldo5(18),
+ regulator_desc_s2mpu02_ldo3(19),
+ regulator_desc_s2mpu02_ldo4(20),
+ regulator_desc_s2mpu02_ldo5(21),
+ regulator_desc_s2mpu02_ldo5(22),
+ regulator_desc_s2mpu02_ldo5(23),
+ regulator_desc_s2mpu02_ldo4(24),
+ regulator_desc_s2mpu02_ldo5(25),
+ regulator_desc_s2mpu02_ldo4(26),
+ regulator_desc_s2mpu02_ldo5(27),
+ regulator_desc_s2mpu02_ldo5(28),
+ regulator_desc_s2mpu02_buck1234(1),
+ regulator_desc_s2mpu02_buck1234(2),
+ regulator_desc_s2mpu02_buck1234(3),
+ regulator_desc_s2mpu02_buck1234(4),
+ regulator_desc_s2mpu02_buck5(5),
+ regulator_desc_s2mpu02_buck6(6),
+ regulator_desc_s2mpu02_buck7(7),
+};
+
static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
@@ -634,15 +908,14 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
struct s2mps11_info *s2mps11;
int i, ret = 0;
const struct regulator_desc *regulators;
- enum sec_device_type dev_type;
s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
GFP_KERNEL);
if (!s2mps11)
return -ENOMEM;
- dev_type = platform_get_device_id(pdev)->driver_data;
- switch (dev_type) {
+ s2mps11->dev_type = platform_get_device_id(pdev)->driver_data;
+ switch (s2mps11->dev_type) {
case S2MPS11X:
s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
regulators = s2mps11_regulators;
@@ -651,8 +924,13 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
regulators = s2mps14_regulators;
break;
+ case S2MPU02:
+ s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
+ regulators = s2mpu02_regulators;
+ break;
default:
- dev_err(&pdev->dev, "Invalid device type: %u\n", dev_type);
+ dev_err(&pdev->dev, "Invalid device type: %u\n",
+ s2mps11->dev_type);
return -EINVAL;
};
@@ -686,7 +964,7 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
for (i = 0; i < s2mps11->rdev_num; i++)
rdata[i].name = regulators[i].name;
- ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11, dev_type);
+ ret = s2mps11_pmic_dt_parse(pdev, rdata, s2mps11);
if (ret)
goto out;
@@ -739,6 +1017,7 @@ out:
static const struct platform_device_id s2mps11_pmic_id[] = {
{ "s2mps11-pmic", S2MPS11X},
{ "s2mps14-pmic", S2MPS14X},
+ { "s2mpu02-pmic", S2MPU02},
{ },
};
MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0754f5c7cb3b..a168e96142b9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -373,6 +373,14 @@ config RTC_DRV_PCF8563
This driver can also be built as a module. If so, the module
will be called rtc-pcf8563.
+config RTC_DRV_PCF85063
+ tristate "nxp PCF85063"
+ help
+ If you say yes here you get support for the PCF85063 RTC chip
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-pcf85063.
+
config RTC_DRV_PCF8583
tristate "Philips PCF8583"
help
@@ -760,6 +768,15 @@ config RTC_DRV_DS1742
This driver can also be built as a module. If so, the module
will be called rtc-ds1742.
+config RTC_DRV_DS2404
+ tristate "Maxim/Dallas DS2404"
+ help
+ If you say yes here you get support for the
+ Dallas DS2404 RTC chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-ds2404.
+
config RTC_DRV_DA9052
tristate "Dialog DA9052/DA9053 RTC"
depends on PMIC_DA9052
@@ -789,7 +806,7 @@ config RTC_DRV_DA9063
config RTC_DRV_EFI
tristate "EFI RTC"
- depends on IA64
+ depends on EFI
help
If you say yes here you will get support for the EFI
Real Time Clock.
@@ -873,15 +890,6 @@ config RTC_DRV_V3020
This driver can also be built as a module. If so, the module
will be called rtc-v3020.
-config RTC_DRV_DS2404
- tristate "Dallas DS2404"
- help
- If you say yes here you get support for the
- Dallas DS2404 RTC chip.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-ds2404.
-
config RTC_DRV_WM831X
tristate "Wolfson Microelectronics WM831x RTC"
depends on MFD_WM831X
@@ -1349,6 +1357,7 @@ config RTC_DRV_SIRFSOC
config RTC_DRV_MOXART
tristate "MOXA ART RTC"
+ depends on ARCH_MOXART || COMPILE_TEST
help
If you say yes here you get support for the MOXA ART
RTC module.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 70347d041d10..56f061c7c815 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_SYSTOHC) += systohc.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
rtc-core-y := class.o interface.o
+ifdef CONFIG_RTC_DRV_EFI
+rtc-core-y += rtc-efi-platform.o
+endif
+
rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
@@ -93,6 +97,7 @@ obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o
obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o
obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o
+obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o
obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o
obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o
obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 589351ef75d0..38e26be705be 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -53,6 +53,7 @@ static int rtc_suspend(struct device *dev)
struct rtc_device *rtc = to_rtc_device(dev);
struct rtc_time tm;
struct timespec delta, delta_delta;
+ int err;
if (has_persistent_clock())
return 0;
@@ -61,7 +62,12 @@ static int rtc_suspend(struct device *dev)
return 0;
/* snapshot the current RTC and system time at suspend*/
- rtc_read_time(rtc, &tm);
+ err = rtc_read_time(rtc, &tm);
+ if (err < 0) {
+ pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
+ return 0;
+ }
+
getnstimeofday(&old_system);
rtc_tm_to_time(&tm, &old_rtc.tv_sec);
@@ -94,6 +100,7 @@ static int rtc_resume(struct device *dev)
struct rtc_time tm;
struct timespec new_system, new_rtc;
struct timespec sleep_time;
+ int err;
if (has_persistent_clock())
return 0;
@@ -104,7 +111,12 @@ static int rtc_resume(struct device *dev)
/* snapshot the current rtc and system time at resume */
getnstimeofday(&new_system);
- rtc_read_time(rtc, &tm);
+ err = rtc_read_time(rtc, &tm);
+ if (err < 0) {
+ pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev));
+ return 0;
+ }
+
if (rtc_valid_tm(&tm) != 0) {
pr_debug("%s: bogus resume time\n", dev_name(&rtc->dev));
return 0;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 5813fa52c3d4..5b2717f5dafa 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -348,6 +348,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
/* Make sure we're not setting alarms in the past */
err = __rtc_read_time(rtc, &tm);
+ if (err)
+ return err;
rtc_tm_to_time(&tm, &now);
if (scheduled <= now)
return -ETIME;
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index ed526a192ce0..fd25e2374d4e 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -32,7 +32,7 @@ static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned long t;
- t = au_readl(SYS_TOYREAD);
+ t = alchemy_rdsys(AU1000_SYS_TOYREAD);
rtc_time_to_tm(t, tm);
@@ -45,13 +45,12 @@ static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_tm_to_time(tm, &t);
- au_writel(t, SYS_TOYWRITE);
- au_sync();
+ alchemy_wrsys(t, AU1000_SYS_TOYWRITE);
/* wait for the pending register write to succeed. This can
* take up to 6 seconds...
*/
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S)
+ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S)
msleep(1);
return 0;
@@ -68,7 +67,7 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
unsigned long t;
int ret;
- t = au_readl(SYS_COUNTER_CNTRL);
+ t = alchemy_rdsys(AU1000_SYS_CNTRCTRL);
if (!(t & CNTR_OK)) {
dev_err(&pdev->dev, "counters not working; aborting.\n");
ret = -ENODEV;
@@ -78,10 +77,10 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
ret = -ETIMEDOUT;
/* set counter0 tickrate to 1Hz if necessary */
- if (au_readl(SYS_TOYTRIM) != 32767) {
+ if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767) {
/* wait until hardware gives access to TRIM register */
t = 0x00100000;
- while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S) && --t)
+ while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T0S) && --t)
msleep(1);
if (!t) {
@@ -93,12 +92,11 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
}
/* set 1Hz TOY tick rate */
- au_writel(32767, SYS_TOYTRIM);
- au_sync();
+ alchemy_wrsys(32767, AU1000_SYS_TOYTRIM);
}
/* wait until the hardware allows writes to the counter reg */
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S)
+ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S)
msleep(1);
rtcdev = devm_rtc_device_register(&pdev->dev, "rtc-au1xxx",
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 595393098b09..731ed1a97f59 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -29,6 +29,8 @@
#define YEARS_FROM_DA9063(year) ((year) + 100)
#define MONTHS_FROM_DA9063(month) ((month) - 1)
+#define RTC_ALARM_DATA_LEN (DA9063_AD_REG_ALARM_Y - DA9063_AD_REG_ALARM_MI + 1)
+
#define RTC_DATA_LEN (DA9063_REG_COUNT_Y - DA9063_REG_COUNT_S + 1)
#define RTC_SEC 0
#define RTC_MIN 1
@@ -42,6 +44,10 @@ struct da9063_rtc {
struct da9063 *hw;
struct rtc_time alarm_time;
bool rtc_sync;
+ int alarm_year;
+ int alarm_start;
+ int alarm_len;
+ int data_start;
};
static void da9063_data_to_tm(u8 *data, struct rtc_time *tm)
@@ -83,7 +89,7 @@ static int da9063_rtc_stop_alarm(struct device *dev)
{
struct da9063_rtc *rtc = dev_get_drvdata(dev);
- return regmap_update_bits(rtc->hw->regmap, DA9063_REG_ALARM_Y,
+ return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
DA9063_ALARM_ON, 0);
}
@@ -91,7 +97,7 @@ static int da9063_rtc_start_alarm(struct device *dev)
{
struct da9063_rtc *rtc = dev_get_drvdata(dev);
- return regmap_update_bits(rtc->hw->regmap, DA9063_REG_ALARM_Y,
+ return regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
DA9063_ALARM_ON, DA9063_ALARM_ON);
}
@@ -151,8 +157,9 @@ static int da9063_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
int ret;
unsigned int val;
- ret = regmap_bulk_read(rtc->hw->regmap, DA9063_REG_ALARM_S,
- &data[RTC_SEC], RTC_DATA_LEN);
+ data[RTC_SEC] = 0;
+ ret = regmap_bulk_read(rtc->hw->regmap, rtc->alarm_start,
+ &data[rtc->data_start], rtc->alarm_len);
if (ret < 0)
return ret;
@@ -186,14 +193,14 @@ static int da9063_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
}
- ret = regmap_bulk_write(rtc->hw->regmap, DA9063_REG_ALARM_S,
- data, RTC_DATA_LEN);
+ ret = regmap_bulk_write(rtc->hw->regmap, rtc->alarm_start,
+ &data[rtc->data_start], rtc->alarm_len);
if (ret < 0) {
dev_err(dev, "Failed to write alarm: %d\n", ret);
return ret;
}
- rtc->alarm_time = alrm->time;
+ da9063_data_to_tm(data, &rtc->alarm_time);
if (alrm->enabled) {
ret = da9063_rtc_start_alarm(dev);
@@ -218,7 +225,7 @@ static irqreturn_t da9063_alarm_event(int irq, void *data)
{
struct da9063_rtc *rtc = data;
- regmap_update_bits(rtc->hw->regmap, DA9063_REG_ALARM_Y,
+ regmap_update_bits(rtc->hw->regmap, rtc->alarm_year,
DA9063_ALARM_ON, 0);
rtc->rtc_sync = true;
@@ -257,7 +264,23 @@ static int da9063_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = regmap_update_bits(da9063->regmap, DA9063_REG_ALARM_S,
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ if (da9063->variant_code == PMIC_DA9063_AD) {
+ rtc->alarm_year = DA9063_AD_REG_ALARM_Y;
+ rtc->alarm_start = DA9063_AD_REG_ALARM_MI;
+ rtc->alarm_len = RTC_ALARM_DATA_LEN;
+ rtc->data_start = RTC_MIN;
+ } else {
+ rtc->alarm_year = DA9063_BB_REG_ALARM_Y;
+ rtc->alarm_start = DA9063_BB_REG_ALARM_S;
+ rtc->alarm_len = RTC_DATA_LEN;
+ rtc->data_start = RTC_SEC;
+ }
+
+ ret = regmap_update_bits(da9063->regmap, rtc->alarm_start,
DA9063_ALARM_STATUS_TICK | DA9063_ALARM_STATUS_ALARM,
0);
if (ret < 0) {
@@ -265,7 +288,7 @@ static int da9063_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = regmap_update_bits(da9063->regmap, DA9063_REG_ALARM_S,
+ ret = regmap_update_bits(da9063->regmap, rtc->alarm_start,
DA9063_ALARM_STATUS_ALARM,
DA9063_ALARM_STATUS_ALARM);
if (ret < 0) {
@@ -273,25 +296,22 @@ static int da9063_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = regmap_update_bits(da9063->regmap, DA9063_REG_ALARM_Y,
+ ret = regmap_update_bits(da9063->regmap, rtc->alarm_year,
DA9063_TICK_ON, 0);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to disable TICKs\n");
goto err;
}
- ret = regmap_bulk_read(da9063->regmap, DA9063_REG_ALARM_S,
- data, RTC_DATA_LEN);
+ data[RTC_SEC] = 0;
+ ret = regmap_bulk_read(da9063->regmap, rtc->alarm_start,
+ &data[rtc->data_start], rtc->alarm_len);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to read initial alarm data: %d\n",
ret);
goto err;
}
- rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
- if (!rtc)
- return -ENOMEM;
-
platform_set_drvdata(pdev, rtc);
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index c3719189dd96..ae9f997223b1 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -4,6 +4,7 @@
* Real Time Clock
*
* Author : Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>
+ * Ankur Srivastava <sankurece@gmail.com> : DS1343 Nvram Support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -45,6 +46,9 @@
#define DS1343_CONTROL_REG 0x0F
#define DS1343_STATUS_REG 0x10
#define DS1343_TRICKLE_REG 0x11
+#define DS1343_NVRAM 0x20
+
+#define DS1343_NVRAM_LEN 96
/* DS1343 Control Registers bits */
#define DS1343_EOSC 0x80
@@ -149,6 +153,64 @@ static ssize_t ds1343_store_glitchfilter(struct device *dev,
static DEVICE_ATTR(glitch_filter, S_IRUGO | S_IWUSR, ds1343_show_glitchfilter,
ds1343_store_glitchfilter);
+static ssize_t ds1343_nvram_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ int ret;
+ unsigned char address;
+ struct device *dev = kobj_to_dev(kobj);
+ struct ds1343_priv *priv = dev_get_drvdata(dev);
+
+ if (unlikely(!count))
+ return count;
+
+ if ((count + off) > DS1343_NVRAM_LEN)
+ count = DS1343_NVRAM_LEN - off;
+
+ address = DS1343_NVRAM + off;
+
+ ret = regmap_bulk_write(priv->map, address, buf, count);
+ if (ret < 0)
+ dev_err(&priv->spi->dev, "Error in nvram write %d", ret);
+
+ return (ret < 0) ? ret : count;
+}
+
+
+static ssize_t ds1343_nvram_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ int ret;
+ unsigned char address;
+ struct device *dev = kobj_to_dev(kobj);
+ struct ds1343_priv *priv = dev_get_drvdata(dev);
+
+ if (unlikely(!count))
+ return count;
+
+ if ((count + off) > DS1343_NVRAM_LEN)
+ count = DS1343_NVRAM_LEN - off;
+
+ address = DS1343_NVRAM + off;
+
+ ret = regmap_bulk_read(priv->map, address, buf, count);
+ if (ret < 0)
+ dev_err(&priv->spi->dev, "Error in nvram read %d\n", ret);
+
+ return (ret < 0) ? ret : count;
+}
+
+
+static struct bin_attribute nvram_attr = {
+ .attr.name = "nvram",
+ .attr.mode = S_IRUGO | S_IWUSR,
+ .read = ds1343_nvram_read,
+ .write = ds1343_nvram_write,
+ .size = DS1343_NVRAM_LEN,
+};
+
static ssize_t ds1343_show_alarmstatus(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -274,12 +336,16 @@ static int ds1343_sysfs_register(struct device *dev)
if (err)
goto error1;
+ err = device_create_bin_file(dev, &nvram_attr);
+ if (err)
+ goto error2;
+
if (priv->irq <= 0)
return err;
err = device_create_file(dev, &dev_attr_alarm_mode);
if (err)
- goto error2;
+ goto error3;
err = device_create_file(dev, &dev_attr_alarm_status);
if (!err)
@@ -287,6 +353,9 @@ static int ds1343_sysfs_register(struct device *dev)
device_remove_file(dev, &dev_attr_alarm_mode);
+error3:
+ device_remove_bin_file(dev, &nvram_attr);
+
error2:
device_remove_file(dev, &dev_attr_trickle_charger);
@@ -302,6 +371,7 @@ static void ds1343_sysfs_unregister(struct device *dev)
device_remove_file(dev, &dev_attr_glitch_filter);
device_remove_file(dev, &dev_attr_trickle_charger);
+ device_remove_bin_file(dev, &nvram_attr);
if (priv->irq <= 0)
return;
@@ -684,6 +754,7 @@ static struct spi_driver ds1343_driver = {
module_spi_driver(ds1343_driver);
MODULE_DESCRIPTION("DS1343 RTC SPI Driver");
-MODULE_AUTHOR("Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>");
+MODULE_AUTHOR("Raghavendra Chandra Ganiga <ravi23ganiga@gmail.com>,"
+ "Ankur Srivastava <sankurece@gmail.com>");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DS1343_DRV_VERSION);
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index c6b2191a4128..9822715db8ba 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -231,7 +231,7 @@ static struct platform_driver ds1742_rtc_driver = {
.driver = {
.name = "rtc-ds1742",
.owner = THIS_MODULE,
- .of_match_table = ds1742_rtc_of_match,
+ .of_match_table = of_match_ptr(ds1742_rtc_of_match),
},
};
diff --git a/drivers/rtc/rtc-efi-platform.c b/drivers/rtc/rtc-efi-platform.c
new file mode 100644
index 000000000000..b40fbe332af4
--- /dev/null
+++ b/drivers/rtc/rtc-efi-platform.c
@@ -0,0 +1,31 @@
+/*
+ * Moved from arch/ia64/kernel/time.c
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ * Copyright (C) 1999-2000 VA Linux Systems
+ * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/efi.h>
+#include <linux/platform_device.h>
+
+static struct platform_device rtc_efi_dev = {
+ .name = "rtc-efi",
+ .id = -1,
+};
+
+static int __init rtc_init(void)
+{
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ if (platform_device_register(&rtc_efi_dev) < 0)
+ pr_err("unable to register rtc device...\n");
+
+ /* not necessarily an error */
+ return 0;
+}
+module_init(rtc_init);
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index c4c38431012e..8225b89de810 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/stringify.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -48,8 +49,8 @@ compute_wday(efi_time_t *eft)
int y;
int ndays = 0;
- if (eft->year < 1998) {
- pr_err("EFI year < 1998, invalid date\n");
+ if (eft->year < EFI_RTC_EPOCH) {
+ pr_err("EFI year < " __stringify(EFI_RTC_EPOCH) ", invalid date\n");
return -1;
}
@@ -78,19 +79,36 @@ convert_to_efi_time(struct rtc_time *wtime, efi_time_t *eft)
eft->timezone = EFI_UNSPECIFIED_TIMEZONE;
}
-static void
+static bool
convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
{
memset(wtime, 0, sizeof(*wtime));
+
+ if (eft->second >= 60)
+ return false;
wtime->tm_sec = eft->second;
+
+ if (eft->minute >= 60)
+ return false;
wtime->tm_min = eft->minute;
+
+ if (eft->hour >= 24)
+ return false;
wtime->tm_hour = eft->hour;
+
+ if (!eft->day || eft->day > 31)
+ return false;
wtime->tm_mday = eft->day;
+
+ if (!eft->month || eft->month > 12)
+ return false;
wtime->tm_mon = eft->month - 1;
wtime->tm_year = eft->year - 1900;
/* day of the week [0-6], Sunday=0 */
wtime->tm_wday = compute_wday(eft);
+ if (wtime->tm_wday < 0)
+ return false;
/* day in the year [1-365]*/
wtime->tm_yday = compute_yday(eft);
@@ -106,6 +124,8 @@ convert_from_efi_time(efi_time_t *eft, struct rtc_time *wtime)
default:
wtime->tm_isdst = -1;
}
+
+ return true;
}
static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
@@ -122,7 +142,8 @@ static int efi_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
if (status != EFI_SUCCESS)
return -EINVAL;
- convert_from_efi_time(&eft, &wkalrm->time);
+ if (!convert_from_efi_time(&eft, &wkalrm->time))
+ return -EIO;
return rtc_valid_tm(&wkalrm->time);
}
@@ -163,7 +184,8 @@ static int efi_read_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
- convert_from_efi_time(&eft, tm);
+ if (!convert_from_efi_time(&eft, tm))
+ return -EIO;
return rtc_valid_tm(tm);
}
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 03b891129428..aa55f081c505 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#define DRV_VERSION "0.1"
@@ -271,6 +273,13 @@ static int isl12022_probe(struct i2c_client *client,
return PTR_ERR_OR_ZERO(isl12022->rtc);
}
+#ifdef CONFIG_OF
+static struct of_device_id isl12022_dt_match[] = {
+ { .compatible = "isl,isl12022" },
+ { },
+};
+#endif
+
static const struct i2c_device_id isl12022_id[] = {
{ "isl12022", 0 },
{ }
@@ -280,6 +289,9 @@ MODULE_DEVICE_TABLE(i2c, isl12022_id);
static struct i2c_driver isl12022_driver = {
.driver = {
.name = "rtc-isl12022",
+#ifdef CONFIG_OF
+ .of_match_table = of_match_ptr(isl12022_dt_match),
+#endif
},
.probe = isl12022_probe,
.id_table = isl12022_id,
diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
index 9efe118a28ba..d20a7f0786eb 100644
--- a/drivers/rtc/rtc-max77686.c
+++ b/drivers/rtc/rtc-max77686.c
@@ -492,16 +492,11 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
return ret;
}
-static struct regmap_config max77686_rtc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
static int max77686_rtc_probe(struct platform_device *pdev)
{
struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent);
struct max77686_rtc_info *info;
- int ret, virq;
+ int ret;
dev_info(&pdev->dev, "%s\n", __func__);
@@ -514,14 +509,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->max77686 = max77686;
info->rtc = max77686->rtc;
- info->max77686->rtc_regmap = devm_regmap_init_i2c(info->max77686->rtc,
- &max77686_rtc_regmap_config);
- if (IS_ERR(info->max77686->rtc_regmap)) {
- ret = PTR_ERR(info->max77686->rtc_regmap);
- dev_err(info->max77686->dev, "Failed to allocate register map: %d\n",
- ret);
- return ret;
- }
+
platform_set_drvdata(pdev, info);
ret = max77686_rtc_init_reg(info);
@@ -550,15 +538,16 @@ static int max77686_rtc_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_rtc;
}
- virq = irq_create_mapping(max77686->irq_domain, MAX77686_RTCIRQ_RTCA1);
- if (!virq) {
+
+ info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
+ MAX77686_RTCIRQ_RTCA1);
+ if (!info->virq) {
ret = -ENXIO;
goto err_rtc;
}
- info->virq = virq;
- ret = devm_request_threaded_irq(&pdev->dev, virq, NULL,
- max77686_rtc_alarm_irq, 0, "rtc-alarm0", info);
+ ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL,
+ max77686_rtc_alarm_irq, 0, "rtc-alarm1", info);
if (ret < 0)
dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
info->virq, ret);
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
new file mode 100644
index 000000000000..6a12bf62c504
--- /dev/null
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -0,0 +1,204 @@
+/*
+ * An I2C driver for the PCF85063 RTC
+ * Copyright 2014 Rose Technology
+ *
+ * Author: Søren Andersen <san@rosetechnology.dk>
+ * Maintainers: http://www.nslu2-linux.org/
+ *
+ * based on the other drivers in this same directory.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/module.h>
+
+#define DRV_VERSION "0.0.1"
+
+#define PCF85063_REG_CTRL1 0x00 /* status */
+#define PCF85063_REG_CTRL2 0x01
+
+#define PCF85063_REG_SC 0x04 /* datetime */
+#define PCF85063_REG_MN 0x05
+#define PCF85063_REG_HR 0x06
+#define PCF85063_REG_DM 0x07
+#define PCF85063_REG_DW 0x08
+#define PCF85063_REG_MO 0x09
+#define PCF85063_REG_YR 0x0A
+
+#define PCF85063_MO_C 0x80 /* century */
+
+static struct i2c_driver pcf85063_driver;
+
+struct pcf85063 {
+ struct rtc_device *rtc;
+ int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
+ int voltage_low; /* indicates if a low_voltage was detected */
+};
+
+/*
+ * In the routines that deal directly with the pcf85063 hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int pcf85063_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+ struct pcf85063 *pcf85063 = i2c_get_clientdata(client);
+ unsigned char buf[13] = { PCF85063_REG_CTRL1 };
+ struct i2c_msg msgs[] = {
+ {/* setup read ptr */
+ .addr = client->addr,
+ .len = 1,
+ .buf = buf
+ },
+ {/* read status + date */
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 13,
+ .buf = buf
+ },
+ };
+
+ /* read registers */
+ if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
+ dev_err(&client->dev, "%s: read error\n", __func__);
+ return -EIO;
+ }
+
+ tm->tm_sec = bcd2bin(buf[PCF85063_REG_SC] & 0x7F);
+ tm->tm_min = bcd2bin(buf[PCF85063_REG_MN] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[PCF85063_REG_HR] & 0x3F); /* rtc hr 0-23 */
+ tm->tm_mday = bcd2bin(buf[PCF85063_REG_DM] & 0x3F);
+ tm->tm_wday = buf[PCF85063_REG_DW] & 0x07;
+ tm->tm_mon = bcd2bin(buf[PCF85063_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
+ tm->tm_year = bcd2bin(buf[PCF85063_REG_YR]);
+ if (tm->tm_year < 70)
+ tm->tm_year += 100; /* assume we are in 1970...2069 */
+ /* detect the polarity heuristically. see note above. */
+ pcf85063->c_polarity = (buf[PCF85063_REG_MO] & PCF85063_MO_C) ?
+ (tm->tm_year >= 100) : (tm->tm_year < 100);
+
+ /* the clock can give out invalid datetime, but we cannot return
+ * -EINVAL otherwise hwclock will refuse to set the time on bootup.
+ */
+ if (rtc_valid_tm(tm) < 0)
+ dev_err(&client->dev, "retrieved date/time is not valid.\n");
+
+ return 0;
+}
+
+static int pcf85063_set_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+ int i = 0, err = 0;
+ unsigned char buf[11];
+
+ /* Control & status */
+ buf[PCF85063_REG_CTRL1] = 0;
+ buf[PCF85063_REG_CTRL2] = 5;
+
+ /* hours, minutes and seconds */
+ buf[PCF85063_REG_SC] = bin2bcd(tm->tm_sec) & 0x7F;
+
+ buf[PCF85063_REG_MN] = bin2bcd(tm->tm_min);
+ buf[PCF85063_REG_HR] = bin2bcd(tm->tm_hour);
+
+ /* Day of month, 1 - 31 */
+ buf[PCF85063_REG_DM] = bin2bcd(tm->tm_mday);
+
+ /* Day, 0 - 6 */
+ buf[PCF85063_REG_DW] = tm->tm_wday & 0x07;
+
+ /* month, 1 - 12 */
+ buf[PCF85063_REG_MO] = bin2bcd(tm->tm_mon + 1);
+
+ /* year and century */
+ buf[PCF85063_REG_YR] = bin2bcd(tm->tm_year % 100);
+
+ /* write register's data */
+ for (i = 0; i < sizeof(buf); i++) {
+ unsigned char data[2] = { i, buf[i] };
+
+ err = i2c_master_send(client, data, sizeof(data));
+ if (err != sizeof(data)) {
+ dev_err(&client->dev, "%s: err=%d addr=%02x, data=%02x\n",
+ __func__, err, data[0], data[1]);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ return pcf85063_get_datetime(to_i2c_client(dev), tm);
+}
+
+static int pcf85063_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ return pcf85063_set_datetime(to_i2c_client(dev), tm);
+}
+
+static const struct rtc_class_ops pcf85063_rtc_ops = {
+ .read_time = pcf85063_rtc_read_time,
+ .set_time = pcf85063_rtc_set_time
+};
+
+static int pcf85063_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pcf85063 *pcf85063;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ pcf85063 = devm_kzalloc(&client->dev, sizeof(struct pcf85063),
+ GFP_KERNEL);
+ if (!pcf85063)
+ return -ENOMEM;
+
+ dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
+
+ i2c_set_clientdata(client, pcf85063);
+
+ pcf85063->rtc = devm_rtc_device_register(&client->dev,
+ pcf85063_driver.driver.name,
+ &pcf85063_rtc_ops, THIS_MODULE);
+
+ return PTR_ERR_OR_ZERO(pcf85063->rtc);
+}
+
+static const struct i2c_device_id pcf85063_id[] = {
+ { "pcf85063", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf85063_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcf85063_of_match[] = {
+ { .compatible = "nxp,pcf85063" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pcf85063_of_match);
+#endif
+
+static struct i2c_driver pcf85063_driver = {
+ .driver = {
+ .name = "rtc-pcf85063",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcf85063_of_match),
+ },
+ .probe = pcf85063_probe,
+ .id_table = pcf85063_id,
+};
+
+module_i2c_driver(pcf85063_driver);
+
+MODULE_AUTHOR("Søren Andersen <san@rosetechnology.dk>");
+MODULE_DESCRIPTION("PCF85063 RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 63b558c48196..5a197d9dc7e7 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -26,6 +26,8 @@
#define PCF8563_REG_ST1 0x00 /* status */
#define PCF8563_REG_ST2 0x01
+#define PCF8563_BIT_AIE (1 << 1)
+#define PCF8563_BIT_AF (1 << 3)
#define PCF8563_REG_SC 0x02 /* datetime */
#define PCF8563_REG_MN 0x03
@@ -36,9 +38,6 @@
#define PCF8563_REG_YR 0x08
#define PCF8563_REG_AMN 0x09 /* alarm */
-#define PCF8563_REG_AHR 0x0A
-#define PCF8563_REG_ADM 0x0B
-#define PCF8563_REG_ADW 0x0C
#define PCF8563_REG_CLKO 0x0D /* clock out */
#define PCF8563_REG_TMRC 0x0E /* timer control */
@@ -67,37 +66,133 @@ struct pcf8563 {
*/
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
int voltage_low; /* incicates if a low_voltage was detected */
+
+ struct i2c_client *client;
};
-/*
- * In the routines that deal directly with the pcf8563 hardware, we use
- * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
- */
-static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+static int pcf8563_read_block_data(struct i2c_client *client, unsigned char reg,
+ unsigned char length, unsigned char *buf)
{
- struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
- unsigned char buf[13] = { PCF8563_REG_ST1 };
-
struct i2c_msg msgs[] = {
{/* setup read ptr */
.addr = client->addr,
.len = 1,
- .buf = buf
+ .buf = &reg,
},
- {/* read status + date */
+ {
.addr = client->addr,
.flags = I2C_M_RD,
- .len = 13,
+ .len = length,
.buf = buf
},
};
- /* read registers */
if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
dev_err(&client->dev, "%s: read error\n", __func__);
return -EIO;
}
+ return 0;
+}
+
+static int pcf8563_write_block_data(struct i2c_client *client,
+ unsigned char reg, unsigned char length,
+ unsigned char *buf)
+{
+ int i, err;
+
+ for (i = 0; i < length; i++) {
+ unsigned char data[2] = { reg + i, buf[i] };
+
+ err = i2c_master_send(client, data, sizeof(data));
+ if (err != sizeof(data)) {
+ dev_err(&client->dev,
+ "%s: err=%d addr=%02x, data=%02x\n",
+ __func__, err, data[0], data[1]);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int pcf8563_set_alarm_mode(struct i2c_client *client, bool on)
+{
+ unsigned char buf[2];
+ int err;
+
+ err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, buf + 1);
+ if (err < 0)
+ return err;
+
+ if (on)
+ buf[1] |= PCF8563_BIT_AIE;
+ else
+ buf[1] &= ~PCF8563_BIT_AIE;
+
+ buf[1] &= ~PCF8563_BIT_AF;
+ buf[0] = PCF8563_REG_ST2;
+
+ err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, buf + 1);
+ if (err < 0) {
+ dev_err(&client->dev, "%s: write error\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int pcf8563_get_alarm_mode(struct i2c_client *client, unsigned char *en,
+ unsigned char *pen)
+{
+ unsigned char buf;
+ int err;
+
+ err = pcf8563_read_block_data(client, PCF8563_REG_ST2, 1, &buf);
+ if (err)
+ return err;
+
+ if (en)
+ *en = !!(buf & PCF8563_BIT_AIE);
+ if (pen)
+ *pen = !!(buf & PCF8563_BIT_AF);
+
+ return 0;
+}
+
+static irqreturn_t pcf8563_irq(int irq, void *dev_id)
+{
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(dev_id);
+ int err;
+ char pending;
+
+ err = pcf8563_get_alarm_mode(pcf8563->client, NULL, &pending);
+ if (err < 0)
+ return err;
+
+ if (pending) {
+ rtc_update_irq(pcf8563->rtc, 1, RTC_IRQF | RTC_AF);
+ pcf8563_set_alarm_mode(pcf8563->client, 1);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * In the routines that deal directly with the pcf8563 hardware, we use
+ * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
+ */
+static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+{
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
+ unsigned char buf[9];
+ int err;
+
+ err = pcf8563_read_block_data(client, PCF8563_REG_ST1, 9, buf);
+ if (err)
+ return err;
+
if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
pcf8563->voltage_low = 1;
dev_info(&client->dev,
@@ -144,7 +239,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
{
struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
- int i, err;
+ int err;
unsigned char buf[9];
dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
@@ -170,19 +265,10 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
buf[PCF8563_REG_DW] = tm->tm_wday & 0x07;
- /* write register's data */
- for (i = 0; i < 7; i++) {
- unsigned char data[2] = { PCF8563_REG_SC + i,
- buf[PCF8563_REG_SC + i] };
-
- err = i2c_master_send(client, data, sizeof(data));
- if (err != sizeof(data)) {
- dev_err(&client->dev,
- "%s: err=%d addr=%02x, data=%02x\n",
- __func__, err, data[0], data[1]);
- return -EIO;
- }
- }
+ err = pcf8563_write_block_data(client, PCF8563_REG_SC,
+ 9 - PCF8563_REG_SC, buf + PCF8563_REG_SC);
+ if (err)
+ return err;
return 0;
}
@@ -235,16 +321,83 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
return pcf8563_set_datetime(to_i2c_client(dev), tm);
}
+static int pcf8563_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[4];
+ int err;
+
+ err = pcf8563_read_block_data(client, PCF8563_REG_AMN, 4, buf);
+ if (err)
+ return err;
+
+ dev_dbg(&client->dev,
+ "%s: raw data is min=%02x, hr=%02x, mday=%02x, wday=%02x\n",
+ __func__, buf[0], buf[1], buf[2], buf[3]);
+
+ tm->time.tm_min = bcd2bin(buf[0] & 0x7F);
+ tm->time.tm_hour = bcd2bin(buf[1] & 0x7F);
+ tm->time.tm_mday = bcd2bin(buf[2] & 0x1F);
+ tm->time.tm_wday = bcd2bin(buf[3] & 0x7);
+ tm->time.tm_mon = -1;
+ tm->time.tm_year = -1;
+ tm->time.tm_yday = -1;
+ tm->time.tm_isdst = -1;
+
+ err = pcf8563_get_alarm_mode(client, &tm->enabled, &tm->pending);
+ if (err < 0)
+ return err;
+
+ dev_dbg(&client->dev, "%s: tm is mins=%d, hours=%d, mday=%d, wday=%d,"
+ " enabled=%d, pending=%d\n", __func__, tm->time.tm_min,
+ tm->time.tm_hour, tm->time.tm_mday, tm->time.tm_wday,
+ tm->enabled, tm->pending);
+
+ return 0;
+}
+
+static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[4];
+ int err;
+
+ dev_dbg(dev, "%s, min=%d hour=%d wday=%d mday=%d "
+ "enabled=%d pending=%d\n", __func__,
+ tm->time.tm_min, tm->time.tm_hour, tm->time.tm_wday,
+ tm->time.tm_mday, tm->enabled, tm->pending);
+
+ buf[0] = bin2bcd(tm->time.tm_min);
+ buf[1] = bin2bcd(tm->time.tm_hour);
+ buf[2] = bin2bcd(tm->time.tm_mday);
+ buf[3] = tm->time.tm_wday & 0x07;
+
+ err = pcf8563_write_block_data(client, PCF8563_REG_AMN, 4, buf);
+ if (err)
+ return err;
+
+ return pcf8563_set_alarm_mode(client, 1);
+}
+
+static int pcf8563_irq_enable(struct device *dev, unsigned int enabled)
+{
+ return pcf8563_set_alarm_mode(to_i2c_client(dev), !!enabled);
+}
+
static const struct rtc_class_ops pcf8563_rtc_ops = {
.ioctl = pcf8563_rtc_ioctl,
.read_time = pcf8563_rtc_read_time,
.set_time = pcf8563_rtc_set_time,
+ .read_alarm = pcf8563_rtc_read_alarm,
+ .set_alarm = pcf8563_rtc_set_alarm,
+ .alarm_irq_enable = pcf8563_irq_enable,
};
static int pcf8563_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pcf8563 *pcf8563;
+ int err;
dev_dbg(&client->dev, "%s\n", __func__);
@@ -259,12 +412,30 @@ static int pcf8563_probe(struct i2c_client *client,
dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
i2c_set_clientdata(client, pcf8563);
+ pcf8563->client = client;
+ device_set_wakeup_capable(&client->dev, 1);
pcf8563->rtc = devm_rtc_device_register(&client->dev,
pcf8563_driver.driver.name,
&pcf8563_rtc_ops, THIS_MODULE);
- return PTR_ERR_OR_ZERO(pcf8563->rtc);
+ if (IS_ERR(pcf8563->rtc))
+ return PTR_ERR(pcf8563->rtc);
+
+ if (client->irq > 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf8563_irq,
+ IRQF_SHARED|IRQF_ONESHOT|IRQF_TRIGGER_FALLING,
+ pcf8563->rtc->name, client);
+ if (err) {
+ dev_err(&client->dev, "unable to request IRQ %d\n",
+ client->irq);
+ return err;
+ }
+
+ }
+
+ return 0;
}
static const struct i2c_device_id pcf8563_id[] = {
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 7af00208d637..2583349fbde5 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -258,6 +258,8 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ platform_set_drvdata(pdev, tps_rtc);
+
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_warn(&pdev->dev, "Wake up is not possible as irq = %d\n",
@@ -283,8 +285,6 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, tps_rtc);
-
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1eef0f586950..5df05f26b7d9 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -42,8 +42,10 @@
* SECTION: exported variables of dasd.c
*/
debug_info_t *dasd_debug_area;
+EXPORT_SYMBOL(dasd_debug_area);
static struct dentry *dasd_debugfs_root_entry;
struct dasd_discipline *dasd_diag_discipline_pointer;
+EXPORT_SYMBOL(dasd_diag_discipline_pointer);
void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
@@ -164,6 +166,7 @@ struct dasd_block *dasd_alloc_block(void)
return block;
}
+EXPORT_SYMBOL_GPL(dasd_alloc_block);
/*
* Free memory of a device structure.
@@ -172,6 +175,7 @@ void dasd_free_block(struct dasd_block *block)
{
kfree(block);
}
+EXPORT_SYMBOL_GPL(dasd_free_block);
/*
* Make a new device known to the system.
@@ -281,10 +285,15 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
{
int rc;
+ if (device->discipline->basic_to_known) {
+ rc = device->discipline->basic_to_known(device);
+ if (rc)
+ return rc;
+ }
+
if (device->block) {
dasd_profile_exit(&device->block->profile);
- if (device->block->debugfs_dentry)
- debugfs_remove(device->block->debugfs_dentry);
+ debugfs_remove(device->block->debugfs_dentry);
dasd_gendisk_free(device->block);
dasd_block_clear_timer(device->block);
}
@@ -293,9 +302,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
return rc;
dasd_device_clear_timer(device);
dasd_profile_exit(&device->profile);
- if (device->debugfs_dentry)
- debugfs_remove(device->debugfs_dentry);
-
+ debugfs_remove(device->debugfs_dentry);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
debug_unregister(device->debug_area);
@@ -374,11 +381,6 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
{
int rc;
- if (device->discipline->ready_to_basic) {
- rc = device->discipline->ready_to_basic(device);
- if (rc)
- return rc;
- }
device->state = DASD_STATE_BASIC;
if (device->block) {
struct dasd_block *block = device->block;
@@ -579,6 +581,7 @@ void dasd_kick_device(struct dasd_device *device)
/* queue call to dasd_kick_device to the kernel event daemon. */
schedule_work(&device->kick_work);
}
+EXPORT_SYMBOL(dasd_kick_device);
/*
* dasd_reload_device will schedule a call do do_reload_device to the kernel
@@ -639,6 +642,7 @@ void dasd_set_target_state(struct dasd_device *device, int target)
mutex_unlock(&device->state_mutex);
dasd_put_device(device);
}
+EXPORT_SYMBOL(dasd_set_target_state);
/*
* Enable devices with device numbers in [from..to].
@@ -661,6 +665,7 @@ void dasd_enable_device(struct dasd_device *device)
if (device->discipline->kick_validate)
device->discipline->kick_validate(device);
}
+EXPORT_SYMBOL(dasd_enable_device);
/*
* SECTION: device operation (interrupt handler, start i/o, term i/o ...)
@@ -972,37 +977,37 @@ static void dasd_stats_seq_print(struct seq_file *m,
seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
- seq_printf(m, "histogram_sectors ");
+ seq_puts(m, "histogram_sectors ");
dasd_stats_array(m, data->dasd_io_secs);
- seq_printf(m, "histogram_io_times ");
+ seq_puts(m, "histogram_io_times ");
dasd_stats_array(m, data->dasd_io_times);
- seq_printf(m, "histogram_io_times_weighted ");
+ seq_puts(m, "histogram_io_times_weighted ");
dasd_stats_array(m, data->dasd_io_timps);
- seq_printf(m, "histogram_time_build_to_ssch ");
+ seq_puts(m, "histogram_time_build_to_ssch ");
dasd_stats_array(m, data->dasd_io_time1);
- seq_printf(m, "histogram_time_ssch_to_irq ");
+ seq_puts(m, "histogram_time_ssch_to_irq ");
dasd_stats_array(m, data->dasd_io_time2);
- seq_printf(m, "histogram_time_ssch_to_irq_weighted ");
+ seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
dasd_stats_array(m, data->dasd_io_time2ps);
- seq_printf(m, "histogram_time_irq_to_end ");
+ seq_puts(m, "histogram_time_irq_to_end ");
dasd_stats_array(m, data->dasd_io_time3);
- seq_printf(m, "histogram_ccw_queue_length ");
+ seq_puts(m, "histogram_ccw_queue_length ");
dasd_stats_array(m, data->dasd_io_nr_req);
seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
- seq_printf(m, "histogram_read_sectors ");
+ seq_puts(m, "histogram_read_sectors ");
dasd_stats_array(m, data->dasd_read_secs);
- seq_printf(m, "histogram_read_times ");
+ seq_puts(m, "histogram_read_times ");
dasd_stats_array(m, data->dasd_read_times);
- seq_printf(m, "histogram_read_time_build_to_ssch ");
+ seq_puts(m, "histogram_read_time_build_to_ssch ");
dasd_stats_array(m, data->dasd_read_time1);
- seq_printf(m, "histogram_read_time_ssch_to_irq ");
+ seq_puts(m, "histogram_read_time_ssch_to_irq ");
dasd_stats_array(m, data->dasd_read_time2);
- seq_printf(m, "histogram_read_time_irq_to_end ");
+ seq_puts(m, "histogram_read_time_irq_to_end ");
dasd_stats_array(m, data->dasd_read_time3);
- seq_printf(m, "histogram_read_ccw_queue_length ");
+ seq_puts(m, "histogram_read_ccw_queue_length ");
dasd_stats_array(m, data->dasd_read_nr_req);
}
@@ -1016,7 +1021,7 @@ static int dasd_stats_show(struct seq_file *m, void *v)
data = profile->data;
if (!data) {
spin_unlock_bh(&profile->lock);
- seq_printf(m, "disabled\n");
+ seq_puts(m, "disabled\n");
return 0;
}
dasd_stats_seq_print(m, data);
@@ -1069,7 +1074,7 @@ static ssize_t dasd_stats_global_write(struct file *file,
static int dasd_stats_global_show(struct seq_file *m, void *v)
{
if (!dasd_global_profile_level) {
- seq_printf(m, "disabled\n");
+ seq_puts(m, "disabled\n");
return 0;
}
dasd_stats_seq_print(m, &dasd_global_profile_data);
@@ -1111,23 +1116,17 @@ static void dasd_profile_init(struct dasd_profile *profile,
static void dasd_profile_exit(struct dasd_profile *profile)
{
dasd_profile_off(profile);
- if (profile->dentry) {
- debugfs_remove(profile->dentry);
- profile->dentry = NULL;
- }
+ debugfs_remove(profile->dentry);
+ profile->dentry = NULL;
}
static void dasd_statistics_removeroot(void)
{
dasd_global_profile_level = DASD_PROFILE_OFF;
- if (dasd_global_profile_dentry) {
- debugfs_remove(dasd_global_profile_dentry);
- dasd_global_profile_dentry = NULL;
- }
- if (dasd_debugfs_global_entry)
- debugfs_remove(dasd_debugfs_global_entry);
- if (dasd_debugfs_root_entry)
- debugfs_remove(dasd_debugfs_root_entry);
+ debugfs_remove(dasd_global_profile_dentry);
+ dasd_global_profile_dentry = NULL;
+ debugfs_remove(dasd_debugfs_global_entry);
+ debugfs_remove(dasd_debugfs_root_entry);
}
static void dasd_statistics_createroot(void)
@@ -1178,7 +1177,7 @@ static void dasd_statistics_removeroot(void)
int dasd_stats_generic_show(struct seq_file *m, void *v)
{
- seq_printf(m, "Statistics are not activated in this kernel\n");
+ seq_puts(m, "Statistics are not activated in this kernel\n");
return 0;
}
@@ -1243,6 +1242,7 @@ struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
dasd_get_device(device);
return cqr;
}
+EXPORT_SYMBOL(dasd_kmalloc_request);
struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
int datasize,
@@ -1282,6 +1282,7 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
dasd_get_device(device);
return cqr;
}
+EXPORT_SYMBOL(dasd_smalloc_request);
/*
* Free memory of a channel program. This function needs to free all the
@@ -1304,6 +1305,7 @@ void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
kfree(cqr);
dasd_put_device(device);
}
+EXPORT_SYMBOL(dasd_kfree_request);
void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
@@ -1314,6 +1316,7 @@ void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
spin_unlock_irqrestore(&device->mem_lock, flags);
dasd_put_device(device);
}
+EXPORT_SYMBOL(dasd_sfree_request);
/*
* Check discipline magic in cqr.
@@ -1391,6 +1394,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
dasd_schedule_device_bh(device);
return rc;
}
+EXPORT_SYMBOL(dasd_term_IO);
/*
* Start the i/o. This start_IO can fail if the channel is really busy.
@@ -1509,6 +1513,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
cqr->intrc = rc;
return rc;
}
+EXPORT_SYMBOL(dasd_start_IO);
/*
* Timeout function for dasd devices. This is used for different purposes
@@ -1541,6 +1546,7 @@ void dasd_device_set_timer(struct dasd_device *device, int expires)
else
mod_timer(&device->timer, jiffies + expires);
}
+EXPORT_SYMBOL(dasd_device_set_timer);
/*
* Clear timeout for a device.
@@ -1549,6 +1555,7 @@ void dasd_device_clear_timer(struct dasd_device *device)
{
del_timer(&device->timer);
}
+EXPORT_SYMBOL(dasd_device_clear_timer);
static void dasd_handle_killed_request(struct ccw_device *cdev,
unsigned long intparm)
@@ -1601,6 +1608,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
if (device->block)
dasd_schedule_block_bh(device->block);
}
+EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
/*
* Interrupt handler for "normal" ssch-io based dasd devices.
@@ -1667,8 +1675,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cqr->status == DASD_CQR_CLEAR_PENDING &&
scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_CLEARED;
+ if (cqr->callback_data == DASD_SLEEPON_START_TAG)
+ cqr->callback_data = DASD_SLEEPON_END_TAG;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
+ wake_up(&generic_waitq);
dasd_schedule_device_bh(device);
return;
}
@@ -1722,6 +1733,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
}
+EXPORT_SYMBOL(dasd_int_handler);
enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
{
@@ -1995,6 +2007,7 @@ finished:
__dasd_device_process_final_queue(device, &flush_queue);
return rc;
}
+EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
/*
* Acquire the device lock and process queues for the device.
@@ -2034,6 +2047,7 @@ void dasd_schedule_device_bh(struct dasd_device *device)
dasd_get_device(device);
tasklet_hi_schedule(&device->tasklet);
}
+EXPORT_SYMBOL(dasd_schedule_device_bh);
void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
{
@@ -2066,6 +2080,7 @@ void dasd_add_request_head(struct dasd_ccw_req *cqr)
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
+EXPORT_SYMBOL(dasd_add_request_head);
/*
* Queue a request to the tail of the device ccw_queue.
@@ -2084,6 +2099,7 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
+EXPORT_SYMBOL(dasd_add_request_tail);
/*
* Wakeup helper for the 'sleep_on' functions.
@@ -2291,13 +2307,27 @@ retry:
rc = 0;
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
- if (__dasd_sleep_on_erp(cqr))
- rc = 1;
+ /*
+ * for alias devices simplify error recovery and
+ * return to upper layer
+ */
+ if (cqr->startdev != cqr->basedev &&
+ (cqr->status == DASD_CQR_TERMINATED ||
+ cqr->status == DASD_CQR_NEED_ERP))
+ return -EAGAIN;
+ else {
+ /* normal recovery for basedev IO */
+ if (__dasd_sleep_on_erp(cqr)) {
+ if (!cqr->status == DASD_CQR_TERMINATED &&
+ !cqr->status == DASD_CQR_NEED_ERP)
+ break;
+ rc = 1;
+ }
+ }
}
if (rc)
goto retry;
-
return 0;
}
@@ -2309,6 +2339,7 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
{
return _dasd_sleep_on(cqr, 0);
}
+EXPORT_SYMBOL(dasd_sleep_on);
/*
* Start requests from a ccw_queue and wait for their completion.
@@ -2327,6 +2358,7 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
{
return _dasd_sleep_on(cqr, 1);
}
+EXPORT_SYMBOL(dasd_sleep_on_interruptible);
/*
* Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
@@ -2401,6 +2433,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
return rc;
}
+EXPORT_SYMBOL(dasd_sleep_on_immediatly);
/*
* Cancels a request that was started with dasd_sleep_on_req.
@@ -2423,6 +2456,8 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
case DASD_CQR_QUEUED:
/* request was not started - just set to cleared */
cqr->status = DASD_CQR_CLEARED;
+ if (cqr->callback_data == DASD_SLEEPON_START_TAG)
+ cqr->callback_data = DASD_SLEEPON_END_TAG;
break;
case DASD_CQR_IN_IO:
/* request in IO - terminate IO and release again */
@@ -2442,6 +2477,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
dasd_schedule_device_bh(device);
return rc;
}
+EXPORT_SYMBOL(dasd_cancel_req);
/*
* SECTION: Operations of the dasd_block layer.
@@ -2475,6 +2511,7 @@ void dasd_block_set_timer(struct dasd_block *block, int expires)
else
mod_timer(&block->timer, jiffies + expires);
}
+EXPORT_SYMBOL(dasd_block_set_timer);
/*
* Clear timeout for a dasd_block.
@@ -2483,6 +2520,7 @@ void dasd_block_clear_timer(struct dasd_block *block)
{
del_timer(&block->timer);
}
+EXPORT_SYMBOL(dasd_block_clear_timer);
/*
* Process finished error recovery ccw.
@@ -2864,6 +2902,7 @@ void dasd_schedule_block_bh(struct dasd_block *block)
dasd_get_device(block->base);
tasklet_hi_schedule(&block->tasklet);
}
+EXPORT_SYMBOL(dasd_schedule_block_bh);
/*
@@ -3202,8 +3241,8 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
ret = ccw_device_set_online(cdev);
if (ret)
- pr_warning("%s: Setting the DASD online failed with rc=%d\n",
- dev_name(&cdev->dev), ret);
+ pr_warn("%s: Setting the DASD online failed with rc=%d\n",
+ dev_name(&cdev->dev), ret);
}
/*
@@ -3234,6 +3273,7 @@ int dasd_generic_probe(struct ccw_device *cdev,
async_schedule(dasd_generic_auto_online, cdev);
return 0;
}
+EXPORT_SYMBOL_GPL(dasd_generic_probe);
/*
* This will one day be called from a global not_oper handler.
@@ -3276,6 +3316,7 @@ void dasd_generic_remove(struct ccw_device *cdev)
dasd_remove_sysfs_files(cdev);
}
+EXPORT_SYMBOL_GPL(dasd_generic_remove);
/*
* Activate a device. This is called from dasd_{eckd,fba}_probe() when either
@@ -3298,9 +3339,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
discipline = base_discipline;
if (device->features & DASD_FEATURE_USEDIAG) {
if (!dasd_diag_discipline_pointer) {
- pr_warning("%s Setting the DASD online failed because "
- "of missing DIAG discipline\n",
- dev_name(&cdev->dev));
+ pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
+ dev_name(&cdev->dev));
dasd_delete_device(device);
return -ENODEV;
}
@@ -3321,9 +3361,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* check_device will allocate block device if necessary */
rc = discipline->check_device(device);
if (rc) {
- pr_warning("%s Setting the DASD online with discipline %s "
- "failed with rc=%i\n",
- dev_name(&cdev->dev), discipline->name, rc);
+ pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
+ dev_name(&cdev->dev), discipline->name, rc);
module_put(discipline->owner);
module_put(base_discipline->owner);
dasd_delete_device(device);
@@ -3332,8 +3371,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN) {
- pr_warning("%s Setting the DASD online failed because of a "
- "missing discipline\n", dev_name(&cdev->dev));
+ pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
+ dev_name(&cdev->dev));
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
@@ -3348,6 +3387,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
dasd_put_device(device);
return rc;
}
+EXPORT_SYMBOL_GPL(dasd_generic_set_online);
int dasd_generic_set_offline(struct ccw_device *cdev)
{
@@ -3371,13 +3411,11 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
- pr_warning("%s: The DASD cannot be set offline "
- "with open count %i\n",
- dev_name(&cdev->dev), open_count);
+ pr_warn("%s: The DASD cannot be set offline with open count %i\n",
+ dev_name(&cdev->dev), open_count);
else
- pr_warning("%s: The DASD cannot be set offline "
- "while it is in use\n",
- dev_name(&cdev->dev));
+ pr_warn("%s: The DASD cannot be set offline while it is in use\n",
+ dev_name(&cdev->dev));
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
dasd_put_device(device);
return -EBUSY;
@@ -3451,6 +3489,7 @@ interrupted:
dasd_put_device(device);
return rc;
}
+EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
int dasd_generic_last_path_gone(struct dasd_device *device)
{
@@ -3492,6 +3531,10 @@ int dasd_generic_path_operational(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
+
+ if (!device->stopped)
+ wake_up(&generic_waitq);
+
return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
@@ -3523,6 +3566,7 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
dasd_put_device(device);
return ret;
}
+EXPORT_SYMBOL_GPL(dasd_generic_notify);
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{
@@ -3872,39 +3916,3 @@ failed:
module_init(dasd_init);
module_exit(dasd_exit);
-
-EXPORT_SYMBOL(dasd_debug_area);
-EXPORT_SYMBOL(dasd_diag_discipline_pointer);
-
-EXPORT_SYMBOL(dasd_add_request_head);
-EXPORT_SYMBOL(dasd_add_request_tail);
-EXPORT_SYMBOL(dasd_cancel_req);
-EXPORT_SYMBOL(dasd_device_clear_timer);
-EXPORT_SYMBOL(dasd_block_clear_timer);
-EXPORT_SYMBOL(dasd_enable_device);
-EXPORT_SYMBOL(dasd_int_handler);
-EXPORT_SYMBOL(dasd_kfree_request);
-EXPORT_SYMBOL(dasd_kick_device);
-EXPORT_SYMBOL(dasd_kmalloc_request);
-EXPORT_SYMBOL(dasd_schedule_device_bh);
-EXPORT_SYMBOL(dasd_schedule_block_bh);
-EXPORT_SYMBOL(dasd_set_target_state);
-EXPORT_SYMBOL(dasd_device_set_timer);
-EXPORT_SYMBOL(dasd_block_set_timer);
-EXPORT_SYMBOL(dasd_sfree_request);
-EXPORT_SYMBOL(dasd_sleep_on);
-EXPORT_SYMBOL(dasd_sleep_on_immediatly);
-EXPORT_SYMBOL(dasd_sleep_on_interruptible);
-EXPORT_SYMBOL(dasd_smalloc_request);
-EXPORT_SYMBOL(dasd_start_IO);
-EXPORT_SYMBOL(dasd_term_IO);
-
-EXPORT_SYMBOL_GPL(dasd_generic_probe);
-EXPORT_SYMBOL_GPL(dasd_generic_remove);
-EXPORT_SYMBOL_GPL(dasd_generic_notify);
-EXPORT_SYMBOL_GPL(dasd_generic_set_online);
-EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
-EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
-EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
-EXPORT_SYMBOL_GPL(dasd_alloc_block);
-EXPORT_SYMBOL_GPL(dasd_free_block);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 2e8e0755070b..51dea7baf02c 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2039,7 +2039,7 @@ static int dasd_eckd_online_to_ready(struct dasd_device *device)
return 0;
};
-static int dasd_eckd_ready_to_basic(struct dasd_device *device)
+static int dasd_eckd_basic_to_known(struct dasd_device *device)
{
return dasd_alias_remove_device(device);
};
@@ -2061,11 +2061,12 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
static struct dasd_ccw_req *
dasd_eckd_build_format(struct dasd_device *base,
- struct format_data_t *fdata)
+ struct format_data_t *fdata,
+ int enable_pav)
{
struct dasd_eckd_private *base_priv;
struct dasd_eckd_private *start_priv;
- struct dasd_device *startdev;
+ struct dasd_device *startdev = NULL;
struct dasd_ccw_req *fcp;
struct eckd_count *ect;
struct ch_t address;
@@ -2079,7 +2080,9 @@ dasd_eckd_build_format(struct dasd_device *base,
int nr_tracks;
int use_prefix;
- startdev = dasd_alias_get_start_dev(base);
+ if (enable_pav)
+ startdev = dasd_alias_get_start_dev(base);
+
if (!startdev)
startdev = base;
@@ -2309,6 +2312,7 @@ dasd_eckd_build_format(struct dasd_device *base,
fcp->startdev = startdev;
fcp->memdev = startdev;
+ fcp->basedev = base;
fcp->retries = 256;
fcp->expires = startdev->default_expires * HZ;
fcp->buildclk = get_tod_clock();
@@ -2319,7 +2323,8 @@ dasd_eckd_build_format(struct dasd_device *base,
static int
dasd_eckd_format_device(struct dasd_device *base,
- struct format_data_t *fdata)
+ struct format_data_t *fdata,
+ int enable_pav)
{
struct dasd_ccw_req *cqr, *n;
struct dasd_block *block;
@@ -2327,7 +2332,7 @@ dasd_eckd_format_device(struct dasd_device *base,
struct list_head format_queue;
struct dasd_device *device;
int old_stop, format_step;
- int step, rc = 0;
+ int step, rc = 0, sleep_rc;
block = base->block;
private = (struct dasd_eckd_private *) base->private;
@@ -2361,11 +2366,11 @@ dasd_eckd_format_device(struct dasd_device *base,
}
INIT_LIST_HEAD(&format_queue);
- old_stop = fdata->stop_unit;
+ old_stop = fdata->stop_unit;
while (fdata->start_unit <= 1) {
fdata->stop_unit = fdata->start_unit;
- cqr = dasd_eckd_build_format(base, fdata);
+ cqr = dasd_eckd_build_format(base, fdata, enable_pav);
list_add(&cqr->blocklist, &format_queue);
fdata->stop_unit = old_stop;
@@ -2383,7 +2388,7 @@ retry:
if (step > format_step)
fdata->stop_unit = fdata->start_unit + format_step - 1;
- cqr = dasd_eckd_build_format(base, fdata);
+ cqr = dasd_eckd_build_format(base, fdata, enable_pav);
if (IS_ERR(cqr)) {
if (PTR_ERR(cqr) == -ENOMEM) {
/*
@@ -2403,7 +2408,7 @@ retry:
}
sleep:
- dasd_sleep_on_queue(&format_queue);
+ sleep_rc = dasd_sleep_on_queue(&format_queue);
list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
device = cqr->startdev;
@@ -2415,6 +2420,9 @@ sleep:
private->count--;
}
+ if (sleep_rc)
+ return sleep_rc;
+
/*
* in case of ENOMEM we need to retry after
* first requests are finished
@@ -4511,7 +4519,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.verify_path = dasd_eckd_verify_path,
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
- .ready_to_basic = dasd_eckd_ready_to_basic,
+ .basic_to_known = dasd_eckd_basic_to_known,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 690001af0d09..c20170166909 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -175,6 +175,7 @@ struct dasd_ccw_req {
struct dasd_block *block; /* the originating block device */
struct dasd_device *memdev; /* the device used to allocate this */
struct dasd_device *startdev; /* device the request is started on */
+ struct dasd_device *basedev; /* base device if no block->base */
void *cpaddr; /* address of ccw or tcw */
unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
char status; /* status of this request */
@@ -304,7 +305,7 @@ struct dasd_discipline {
*/
int (*basic_to_ready) (struct dasd_device *);
int (*online_to_ready) (struct dasd_device *);
- int (*ready_to_basic) (struct dasd_device *);
+ int (*basic_to_known)(struct dasd_device *);
/* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
@@ -321,7 +322,7 @@ struct dasd_discipline {
int (*term_IO) (struct dasd_ccw_req *);
void (*handle_terminated_request) (struct dasd_ccw_req *);
int (*format_device) (struct dasd_device *,
- struct format_data_t *);
+ struct format_data_t *, int enable_pav);
int (*free_cp) (struct dasd_ccw_req *, struct request *);
/*
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 25a0f2f8b0b9..02837d0ad942 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -203,7 +203,9 @@ static int
dasd_format(struct dasd_block *block, struct format_data_t *fdata)
{
struct dasd_device *base;
- int rc;
+ int enable_pav = 1;
+ int rc, retries;
+ int start, stop;
base = block->base;
if (base->discipline->format_device == NULL)
@@ -231,11 +233,30 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
bdput(bdev);
}
- rc = base->discipline->format_device(base, fdata);
- if (rc)
- return rc;
-
- return 0;
+ retries = 255;
+ /* backup start- and endtrack for retries */
+ start = fdata->start_unit;
+ stop = fdata->stop_unit;
+ do {
+ rc = base->discipline->format_device(base, fdata, enable_pav);
+ if (rc) {
+ if (rc == -EAGAIN) {
+ retries--;
+ /* disable PAV in case of errors */
+ enable_pav = 0;
+ fdata->start_unit = start;
+ fdata->stop_unit = stop;
+ } else
+ return rc;
+ } else
+ /* success */
+ break;
+ } while (retries);
+
+ if (!retries)
+ return -EIO;
+ else
+ return 0;
}
/*
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 5af7f0bd6125..a6d47e5eee9e 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data)
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
- if (raw->flags & RAW3215_TIMER_RUNS) {
- del_timer(&raw->timer);
- raw->flags &= ~RAW3215_TIMER_RUNS;
- if (!(raw->port.flags & ASYNC_SUSPENDED)) {
- raw3215_mk_write_req(raw);
- raw3215_start_io(raw);
+ raw->flags &= ~RAW3215_TIMER_RUNS;
+ if (!(raw->port.flags & ASYNC_SUSPENDED)) {
+ raw3215_mk_write_req(raw);
+ raw3215_start_io(raw);
+ if ((raw->queued_read || raw->queued_write) &&
+ !(raw->flags & RAW3215_WORKING) &&
+ !(raw->flags & RAW3215_TIMER_RUNS)) {
+ raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+ add_timer(&raw->timer);
+ raw->flags |= RAW3215_TIMER_RUNS;
}
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
(raw->flags & RAW3215_FLUSHING)) {
/* execute write requests bigger than minimum size */
raw3215_start_io(raw);
- if (raw->flags & RAW3215_TIMER_RUNS) {
- del_timer(&raw->timer);
- raw->flags &= ~RAW3215_TIMER_RUNS;
- }
- } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
- /* delay small writes */
- raw->timer.expires = RAW3215_TIMEOUT + jiffies;
- add_timer(&raw->timer);
- raw->flags |= RAW3215_TIMER_RUNS;
}
}
+ if ((raw->queued_read || raw->queued_write) &&
+ !(raw->flags & RAW3215_WORKING) &&
+ !(raw->flags & RAW3215_TIMER_RUNS)) {
+ raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+ add_timer(&raw->timer);
+ raw->flags |= RAW3215_TIMER_RUNS;
+ }
}
/*
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index f5f4a91fab44..f76bff68d1de 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -17,6 +17,8 @@
#include "qdio.h"
#include "qdio_debug.h"
+#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
+
static struct kmem_cache *qdio_q_cache;
static struct kmem_cache *qdio_aob_cache;
@@ -32,6 +34,57 @@ void qdio_release_aob(struct qaob *aob)
}
EXPORT_SYMBOL_GPL(qdio_release_aob);
+/**
+ * qdio_free_buffers() - free qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to free
+ */
+void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
+ free_page((unsigned long) buf[pos]);
+}
+EXPORT_SYMBOL_GPL(qdio_free_buffers);
+
+/**
+ * qdio_alloc_buffers() - allocate qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers to allocate
+ */
+int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
+ buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!buf[pos]) {
+ qdio_free_buffers(buf, count);
+ return -ENOMEM;
+ }
+ }
+ for (pos = 0; pos < count; pos++)
+ if (pos % QBUFF_PER_PAGE)
+ buf[pos] = buf[pos - 1] + 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
+
+/**
+ * qdio_reset_buffers() - reset qdio buffers
+ * @buf: array of pointers to qdio buffers
+ * @count: number of qdio buffers that will be zeroed
+ */
+void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
+{
+ int pos;
+
+ for (pos = 0; pos < count; pos++)
+ memset(buf[pos], 0, sizeof(struct qdio_buffer));
+}
+EXPORT_SYMBOL_GPL(qdio_reset_buffers);
+
/*
* qebsm is only available under 64bit but the adapter sets the feature
* flag anyway, so we manually override it.
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index bbafbd0e017a..97ef37b51068 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -439,10 +439,10 @@ struct qeth_qdio_buffer {
};
struct qeth_qdio_q {
- struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
int next_buf_to_init;
-} __attribute__ ((aligned(256)));
+};
struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
@@ -465,7 +465,7 @@ enum qeth_out_q_states {
};
struct qeth_qdio_out_q {
- struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
int queue_no;
@@ -483,7 +483,7 @@ struct qeth_qdio_out_q {
atomic_t used_buffers;
/* indicates whether PCI flag must be set (or if one is outstanding) */
atomic_t set_pci_flags_count;
-} __attribute__ ((aligned(256)));
+};
struct qeth_qdio_info {
atomic_t state;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 71bfacfc097e..c0d6ba8655c7 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -292,14 +292,43 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
}
EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
+{
+ if (!q)
+ return;
+
+ qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+ kfree(q);
+}
+
+static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
+{
+ struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
+ int i;
+
+ if (!q)
+ return NULL;
+
+ if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
+ kfree(q);
+ return NULL;
+ }
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
+ q->bufs[i].buffer = q->qdio_bufs[i];
+
+ QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
+ return q;
+}
+
static inline int qeth_cq_init(struct qeth_card *card)
{
int rc;
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_DBF_TEXT(SETUP, 2, "cqinit");
- memset(card->qdio.c_q->qdio_bufs, 0,
- QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+ qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
+ QDIO_MAX_BUFFERS_PER_Q);
card->qdio.c_q->next_buf_to_init = 127;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
card->qdio.no_in_queues - 1, 0,
@@ -323,21 +352,12 @@ static inline int qeth_alloc_cq(struct qeth_card *card)
struct qdio_outbuf_state *outbuf_states;
QETH_DBF_TEXT(SETUP, 2, "cqon");
- card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q),
- GFP_KERNEL);
+ card->qdio.c_q = qeth_alloc_qdio_queue();
if (!card->qdio.c_q) {
rc = -1;
goto kmsg_out;
}
- QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
-
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
- card->qdio.c_q->bufs[i].buffer =
- &card->qdio.c_q->qdio_bufs[i];
- }
-
card->qdio.no_in_queues = 2;
-
card->qdio.out_bufstates =
kzalloc(card->qdio.no_out_queues *
QDIO_MAX_BUFFERS_PER_Q *
@@ -361,7 +381,7 @@ static inline int qeth_alloc_cq(struct qeth_card *card)
out:
return rc;
free_cq_out:
- kfree(card->qdio.c_q);
+ qeth_free_qdio_queue(card->qdio.c_q);
card->qdio.c_q = NULL;
kmsg_out:
dev_err(&card->gdev->dev, "Failed to create completion queue\n");
@@ -372,7 +392,7 @@ static inline void qeth_free_cq(struct qeth_card *card)
{
if (card->qdio.c_q) {
--card->qdio.no_in_queues;
- kfree(card->qdio.c_q);
+ qeth_free_qdio_queue(card->qdio.c_q);
card->qdio.c_q = NULL;
}
kfree(card->qdio.out_bufstates);
@@ -1282,35 +1302,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
}
}
-static void qeth_free_qdio_buffers(struct qeth_card *card)
-{
- int i, j;
-
- if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
- QETH_QDIO_UNINITIALIZED)
- return;
-
- qeth_free_cq(card);
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- if (card->qdio.in_q->bufs[j].rx_skb)
- dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
- }
- kfree(card->qdio.in_q);
- card->qdio.in_q = NULL;
- /* inbound buffer pool */
- qeth_free_buffer_pool(card);
- /* free outbound qdio_qs */
- if (card->qdio.out_qs) {
- for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
- kfree(card->qdio.out_qs[i]);
- }
- kfree(card->qdio.out_qs);
- card->qdio.out_qs = NULL;
- }
-}
-
static void qeth_clean_channel(struct qeth_channel *channel)
{
int cnt;
@@ -2392,7 +2383,7 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
rc = -ENOMEM;
goto out;
}
- newbuf->buffer = &q->qdio_bufs[bidx];
+ newbuf->buffer = q->qdio_bufs[bidx];
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
newbuf->q = q;
@@ -2411,6 +2402,28 @@ out:
return rc;
}
+static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+{
+ if (!q)
+ return;
+
+ qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+ kfree(q);
+}
+
+static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
+{
+ struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
+
+ if (!q)
+ return NULL;
+
+ if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
+ kfree(q);
+ return NULL;
+ }
+ return q;
+}
static int qeth_alloc_qdio_buffers(struct qeth_card *card)
{
@@ -2422,19 +2435,11 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
- card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q),
- GFP_KERNEL);
+ QETH_DBF_TEXT(SETUP, 2, "inq");
+ card->qdio.in_q = qeth_alloc_qdio_queue();
if (!card->qdio.in_q)
goto out_nomem;
- QETH_DBF_TEXT(SETUP, 2, "inq");
- QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
- memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
- /* give inbound qeth_qdio_buffers their qdio_buffers */
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
- card->qdio.in_q->bufs[i].buffer =
- &card->qdio.in_q->qdio_bufs[i];
- card->qdio.in_q->bufs[i].rx_skb = NULL;
- }
+
/* inbound buffer pool */
if (qeth_alloc_buffer_pool(card))
goto out_freeinq;
@@ -2446,8 +2451,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
if (!card->qdio.out_qs)
goto out_freepool;
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q),
- GFP_KERNEL);
+ card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
if (!card->qdio.out_qs[i])
goto out_freeoutq;
QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
@@ -2476,7 +2480,7 @@ out_freeoutqbufs:
}
out_freeoutq:
while (i > 0) {
- kfree(card->qdio.out_qs[--i]);
+ qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
}
kfree(card->qdio.out_qs);
@@ -2484,13 +2488,42 @@ out_freeoutq:
out_freepool:
qeth_free_buffer_pool(card);
out_freeinq:
- kfree(card->qdio.in_q);
+ qeth_free_qdio_queue(card->qdio.in_q);
card->qdio.in_q = NULL;
out_nomem:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
}
+static void qeth_free_qdio_buffers(struct qeth_card *card)
+{
+ int i, j;
+
+ if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
+ QETH_QDIO_UNINITIALIZED)
+ return;
+
+ qeth_free_cq(card);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+ if (card->qdio.in_q->bufs[j].rx_skb)
+ dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
+ }
+ qeth_free_qdio_queue(card->qdio.in_q);
+ card->qdio.in_q = NULL;
+ /* inbound buffer pool */
+ qeth_free_buffer_pool(card);
+ /* free outbound qdio_qs */
+ if (card->qdio.out_qs) {
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
+ qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
+ qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
+ }
+ kfree(card->qdio.out_qs);
+ card->qdio.out_qs = NULL;
+ }
+}
+
static void qeth_create_qib_param_field(struct qeth_card *card,
char *param_field)
{
@@ -2788,8 +2821,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "initqdqs");
/* inbound queue */
- memset(card->qdio.in_q->qdio_bufs, 0,
- QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+ qdio_reset_buffers(card->qdio.in_q->qdio_bufs,
+ QDIO_MAX_BUFFERS_PER_Q);
qeth_initialize_working_pool_list(card);
/*give only as many buffers to hardware as we have buffer pool entries*/
for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
@@ -2811,8 +2844,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- memset(card->qdio.out_qs[i]->qdio_bufs, 0,
- QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+ qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
+ QDIO_MAX_BUFFERS_PER_Q);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
qeth_clear_output_buffer(card->qdio.out_qs[i],
card->qdio.out_qs[i]->bufs[j],
@@ -3569,7 +3602,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
for (i = first_element; i < first_element + count; ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
- struct qdio_buffer *buffer = &cq->qdio_bufs[bidx];
+ struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
int e;
e = 0;
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 0ca64484cfa3..5d7fbe4e907e 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -418,7 +418,8 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
- rec->scsi_lun = sc->device->lun;
+ /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
+ rec->scsi_lun = (u32)sc->device->lun;
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 06025cdaa4ad..495e1cb3afa6 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -14,27 +14,10 @@
#include "zfcp_ext.h"
#include "zfcp_qdio.h"
-#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
-
static bool enable_multibuffer = 1;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
-static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
-{
- int pos;
-
- for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
- sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
- if (!sbal[pos])
- return -ENOMEM;
- }
- for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
- if (pos % QBUFF_PER_PAGE)
- sbal[pos] = sbal[pos - 1] + 1;
- return 0;
-}
-
static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
unsigned int qdio_err)
{
@@ -326,15 +309,30 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
struct qdio_initialize init_data;
+ int ret;
- if (zfcp_qdio_buffers_enqueue(qdio->req_q) ||
- zfcp_qdio_buffers_enqueue(qdio->res_q))
+ ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
+ if (ret)
return -ENOMEM;
+ ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
+ if (ret)
+ goto free_req_q;
+
zfcp_qdio_setup_init_data(&init_data, qdio);
init_waitqueue_head(&qdio->req_q_wq);
- return qdio_allocate(&init_data);
+ ret = qdio_allocate(&init_data);
+ if (ret)
+ goto free_res_q;
+
+ return 0;
+
+free_res_q:
+ qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
+free_req_q:
+ qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
+ return ret;
}
/**
@@ -448,19 +446,14 @@ failed_establish:
void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
{
- int p;
-
if (!qdio)
return;
if (qdio->adapter->ccw_device)
qdio_free(qdio->adapter->ccw_device);
- for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
- free_page((unsigned long) qdio->req_q[p]);
- free_page((unsigned long) qdio->res_q[p]);
- }
-
+ qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
+ qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
kfree(qdio);
}
@@ -475,7 +468,7 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
qdio->adapter = adapter;
if (zfcp_qdio_allocate(qdio)) {
- zfcp_qdio_destroy(qdio);
+ kfree(qdio);
return -ENOMEM;
}
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 39f5446f7216..157d3d203ba1 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -21,7 +21,7 @@
void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
{
struct fc_rport *rport = unit->port->rport;
- unsigned int lun;
+ u64 lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
@@ -188,7 +188,7 @@ struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
- unsigned int lun;
+ u64 lun;
lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
port = unit->port;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 4de346017e9f..6da6cec9a651 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -683,14 +683,13 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
+ &dma_handle);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
}
- memset(cpu_addr, 0, size*TW_Q_LENGTH);
-
for (i = 0; i < TW_Q_LENGTH; i++) {
switch(which) {
case 0:
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 49dcf03c631a..29b0b84ed69e 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -392,6 +392,8 @@ typedef struct TAG_TW_Passthru
unsigned char padding[12];
} TW_Passthru;
+#pragma pack()
+
typedef struct TAG_TW_Device_Extension {
u32 base_addr;
unsigned long *alignment_virtual_address[TW_Q_LENGTH];
@@ -430,6 +432,4 @@ typedef struct TAG_TW_Device_Extension {
wait_queue_head_t ioctl_wqueue;
} TW_Device_Extension;
-#pragma pack()
-
#endif /* _3W_XXXX_H */
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index a3adfb4357f5..fabd4be2c985 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1005,7 +1005,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
DMA_TO_DEVICE);
cmnd[0] = REQUEST_SENSE;
- cmnd[1] = (SCp->device->lun & 0x7) << 5;
+ cmnd[1] = (lun & 0x7) << 5;
cmnd[2] = 0;
cmnd[3] = 0;
cmnd[4] = SCSI_SENSE_BUFFERSIZE;
@@ -1396,7 +1396,8 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
struct NCR_700_Host_Parameters *hostdata =
(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
__u16 count = 1; /* for IDENTIFY message */
-
+ u8 lun = SCp->device->lun;
+
if(hostdata->state != NCR_700_HOST_FREE) {
/* keep this inside the lock to close the race window where
* the running command finishes on another CPU while we don't
@@ -1415,7 +1416,7 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
slot->flags != NCR_700_FLAG_AUTOSENSE),
- SCp->device->lun);
+ lun);
/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
* if the negotiated transfer parameters still hold, so
* always renegotiate them */
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index baca5897039f..18a3358eb1d4 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -40,13 +40,6 @@ config SCSI_DMA
bool
default n
-config SCSI_TGT
- tristate "SCSI target support"
- depends on SCSI
- ---help---
- If you want to use SCSI target mode drivers enable this option.
- If you choose M, the module will be called scsi_tgt.
-
config SCSI_NETLINK
bool
default n
@@ -197,20 +190,6 @@ config SCSI_ENCLOSURE
it has an enclosure device. Selecting this option will just allow
certain enclosure conditions to be reported and is not required.
-config SCSI_MULTI_LUN
- bool "Probe all LUNs on each SCSI device"
- depends on SCSI
- help
- Some devices support more than one LUN (Logical Unit Number) in order
- to allow access to several media, e.g. CD jukebox, USB card reader,
- mobile phone in mass storage mode. This option forces the kernel to
- probe for all LUNs by default. This setting can be overridden by
- max_luns boot/module parameter. Note that this option does not affect
- devices conforming to SCSI-3 or higher as they can explicitly report
- their number of LUNs. It is safe to say Y here unless you have one of
- those rare devices which reacts in an unexpected way when probed for
- multiple LUNs.
-
config SCSI_CONSTANTS
bool "Verbose SCSI error reporting (kernel size +=12K)"
depends on SCSI
@@ -285,13 +264,6 @@ config SCSI_FC_ATTRS
each attached FiberChannel device to sysfs, say Y.
Otherwise, say N.
-config SCSI_FC_TGT_ATTRS
- bool "SCSI target support for FiberChannel Transport Attributes"
- depends on SCSI_FC_ATTRS
- depends on SCSI_TGT = y || SCSI_TGT = SCSI_FC_ATTRS
- help
- If you want to use SCSI target mode drivers enable this option.
-
config SCSI_ISCSI_ATTRS
tristate "iSCSI Transport Attributes"
depends on SCSI && NET
@@ -318,13 +290,6 @@ config SCSI_SRP_ATTRS
If you wish to export transport-specific information about
each attached SRP device to sysfs, say Y.
-config SCSI_SRP_TGT_ATTRS
- bool "SCSI target support for SRP Transport Attributes"
- depends on SCSI_SRP_ATTRS
- depends on SCSI_TGT = y || SCSI_TGT = SCSI_SRP_ATTRS
- help
- If you want to use SCSI target mode drivers enable this option.
-
endmenu
menuconfig SCSI_LOWLEVEL
@@ -528,7 +493,7 @@ config SCSI_DPT_I2O
config SCSI_ADVANSYS
tristate "AdvanSys SCSI support"
- depends on SCSI && VIRT_TO_BUS
+ depends on SCSI && VIRT_TO_BUS && !ARM
depends on ISA || EISA || PCI
help
This is a driver for all SCSI host adapters manufactured by
@@ -848,20 +813,6 @@ config SCSI_IBMVSCSI
To compile this driver as a module, choose M here: the
module will be called ibmvscsi.
-config SCSI_IBMVSCSIS
- tristate "IBM Virtual SCSI Server support"
- depends on PPC_PSERIES && SCSI_SRP && SCSI_SRP_TGT_ATTRS
- help
- This is the SRP target driver for IBM pSeries virtual environments.
-
- The userspace component needed to initialize the driver and
- documentation can be found:
-
- http://stgt.berlios.de/
-
- To compile this driver as a module, choose M here: the
- module will be called ibmvstgt.
-
config SCSI_IBMVFC
tristate "IBM Virtual FC support"
depends on PPC_PSERIES && SCSI
@@ -1750,16 +1701,6 @@ config SCSI_PM8001
This driver supports PMC-Sierra PCIE SAS/SATA 8x6G SPC 8001 chip
based host adapters.
-config SCSI_SRP
- tristate "SCSI RDMA Protocol helper library"
- depends on SCSI && PCI
- select SCSI_TGT
- help
- If you wish to use SRP target drivers, say Y.
-
- To compile this driver as a module, choose M here: the
- module will be called libsrp.
-
config SCSI_BFA_FC
tristate "Brocade BFA Fibre Channel Support"
depends on PCI && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e172d4f8e02f..5f0d299b0093 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -20,7 +20,6 @@ CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
obj-$(CONFIG_PCMCIA) += pcmcia/
obj-$(CONFIG_SCSI) += scsi_mod.o
-obj-$(CONFIG_SCSI_TGT) += scsi_tgt.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
@@ -127,9 +126,7 @@ obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_SNI_53C710) += 53c700.o sni_53c710.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o
-obj-$(CONFIG_SCSI_SRP) += libsrp.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
-obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/
obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
@@ -173,8 +170,6 @@ scsi_mod-$(CONFIG_PM) += scsi_pm.o
hv_storvsc-y := storvsc_drv.o
-scsi_tgt-y += scsi_tgt_lib.o scsi_tgt_if.o
-
sd_mod-objs := sd.o
sd_mod-$(CONFIG_BLK_DEV_INTEGRITY) += sd_dif.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 93d13fc9a293..45da3c823322 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -762,7 +762,7 @@ static int __maybe_unused NCR5380_show_info(struct seq_file *m,
static void lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, struct seq_file *m)
{
- SPRINTF("scsi%d : destination target %d, lun %d\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
+ SPRINTF("scsi%d : destination target %d, lun %llu\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
SPRINTF(" command = ");
lprint_command(cmd->cmnd, m);
}
@@ -1039,9 +1039,10 @@ static void NCR5380_main(struct work_struct *work)
for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
{
if (prev != tmp)
- dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
+ dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
/* When we find one, remove it from the issue queue. */
- if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) {
+ if (!(hostdata->busy[tmp->device->id] &
+ (1 << (u8)(tmp->device->lun & 0xff)))) {
if (prev) {
REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble);
prev->host_scribble = tmp->host_scribble;
@@ -1057,7 +1058,7 @@ static void NCR5380_main(struct work_struct *work)
* On failure, we must add the command back to the
* issue queue so we can keep trying.
*/
- dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);
+ dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %llu removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);
/*
* A successful selection is defined as one that
@@ -1524,7 +1525,7 @@ part2:
dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no);
/* XXX need to handle errors here */
hostdata->connected = cmd;
- hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF));
initialize_SCp(cmd);
@@ -2210,14 +2211,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
case LINKED_FLG_CMD_COMPLETE:
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);
/*
* Sanity check : A linked command should only terminate with
* one of these messages if there are more linked commands
* available.
*/
if (!cmd->next_link) {
- printk("scsi%d : target %d lun %d linked command complete, no next_link\n" instance->host_no, cmd->device->id, cmd->device->lun);
+ printk("scsi%d : target %d lun %llu linked command complete, no next_link\n" instance->host_no, cmd->device->id, cmd->device->lun);
sink = 1;
do_abort(instance);
return;
@@ -2226,7 +2227,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
/* The next command is still part of this process */
cmd->next_link->tag = cmd->tag;
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %llu linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
collect_stats(hostdata, cmd);
cmd->scsi_done(cmd);
cmd = hostdata->connected;
@@ -2238,8 +2239,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
sink = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
hostdata->connected = NULL;
- dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun);
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %llu completed\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF));
/*
* I'm not sure what the correct thing to do here is :
@@ -2304,7 +2305,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
case ORDERED_QUEUE_TAG:
case SIMPLE_QUEUE_TAG:
cmd->device->simple_tags = 0;
- hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF));
break;
default:
break;
@@ -2318,7 +2319,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
hostdata->disconnected_queue;
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
- dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);
+ dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %llu was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);
/*
* Restore phase bits to 0 so an interrupted selection,
* arbitration can resume.
@@ -2426,7 +2427,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
hostdata->last_message = msgout;
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (msgout == ABORT) {
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xFF));
hostdata->connected = NULL;
cmd->result = DID_ERROR << 16;
collect_stats(hostdata, cmd);
@@ -2562,7 +2563,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
- if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
+ if ((target_mask == (1 << tmp->device->id)) && (lun == (u8)tmp->device->lun)
) {
if (prev) {
REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble);
@@ -2588,7 +2589,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
do_abort(instance);
} else {
hostdata->connected = tmp;
- dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);
+ dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %llu, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);
}
}
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index c91888a0a23c..42c7161474f7 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -595,7 +595,7 @@ static int NCR53c406a_release(struct Scsi_Host *shost)
{
if (shost->irq)
free_irq(shost->irq, NULL);
-#ifdef USE_DMA
+#if USE_DMA
if (shost->dma_channel != 0xff)
free_dma(shost->dma_channel);
#endif
@@ -698,7 +698,7 @@ static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
int i;
VDEB(printk("NCR53c406a_queue called\n"));
- DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, scsi_bufflen(SCpnt)));
+ DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->target, (u8)SCpnt->device->lun, scsi_bufflen(SCpnt)));
#if 0
VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 0163457c12bb..7e33a61c1ba4 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -891,7 +891,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
printk("max cdb length= %x\b", cmd->cmd_len);
scb->cdb_len = IMAX_CDB;
}
- scb->ident = cmd->device->lun | DISC_ALLOW;
+ scb->ident = (u8)(cmd->device->lun & 0xff) | DISC_ALLOW;
if (cmd->device->tagged_supported) { /* Tag Support */
scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
@@ -1125,23 +1125,19 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
- host->scb_virt = pci_alloc_consistent(pdev, sz,
- &host->scb_phys);
+ host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
}
- memset(host->scb_virt, 0, sz);
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
- host->escb_virt = pci_alloc_consistent(pdev, sz,
- &host->escb_phys);
+ host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
}
- memset(host->escb_virt, 0, sz);
biosaddr = host->BIOScfg;
biosaddr = (biosaddr << 4);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 4921ed19a027..63f576c9300a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -551,7 +551,7 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
int count;
int ret = FAILED;
- printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%d)\n",
+ printk(KERN_ERR "%s: Host adapter abort request (%d,%d,%d,%llu)\n",
AAC_DRIVERNAME,
host->host_no, sdev_channel(dev), sdev_id(dev), dev->lun);
switch (cmd->cmnd[0]) {
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index d8145888e66a..43761c1c46f0 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2512,7 +2512,7 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
printk(" host_busy %u, host_no %d,\n",
- s->host_busy, s->host_no);
+ atomic_read(&s->host_busy), s->host_no);
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
(ulong)s->base, (ulong)s->io_port, boardp->irq);
@@ -3345,8 +3345,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
shost->host_no);
seq_printf(m,
- " host_busy %u, max_id %u, max_lun %u, max_channel %u\n",
- shost->host_busy, shost->max_id,
+ " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n",
+ atomic_read(&shost->host_busy), shost->max_id,
shost->max_lun, shost->max_channel);
seq_printf(m,
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index e86eb6a921fc..e77b72f78006 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -321,7 +321,7 @@ static LIST_HEAD(aha152x_host_list);
#define CMDINFO(cmd) \
(cmd) ? ((cmd)->device->host->host_no) : -1, \
(cmd) ? ((cmd)->device->id & 0x0f) : -1, \
- (cmd) ? ((cmd)->device->lun & 0x07) : -1
+ (cmd) ? ((u8)(cmd)->device->lun & 0x07) : -1
static inline void
CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
@@ -1602,7 +1602,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
#if defined(AHA152X_DEBUG)
int hostno=DONE_SC->device->host->host_no;
int id=DONE_SC->device->id & 0xf;
- int lun=DONE_SC->device->lun & 0x7;
+ int lun=((u8)DONE_SC->device->lun) & 0x7;
#endif
Scsi_Cmnd *ptr = DONE_SC;
DONE_SC=NULL;
@@ -2984,7 +2984,7 @@ static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
int i;
SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
- ptr, ptr->device->id, ptr->device->lun);
+ ptr, ptr->device->id, (u8)ptr->device->lun);
for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
SPRINTF("0x%02x ", ptr->cmnd[i]);
diff --git a/drivers/scsi/aic7xxx/aic7770_osm.c b/drivers/scsi/aic7xxx/aic7770_osm.c
index 0cb8ef64b5ce..3d401d02c019 100644
--- a/drivers/scsi/aic7xxx/aic7770_osm.c
+++ b/drivers/scsi/aic7xxx/aic7770_osm.c
@@ -85,10 +85,9 @@ aic7770_probe(struct device *dev)
int error;
sprintf(buf, "ahc_eisa:%d", eisaBase >> 12);
- name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
+ name = kstrdup(buf, GFP_ATOMIC);
if (name == NULL)
return (ENOMEM);
- strcpy(name, buf);
ahc = ahc_alloc(&aic7xxx_driver_template, name);
if (ahc == NULL)
return (ENOMEM);
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 113874c1284b..df2e0e5367d2 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -115,7 +115,7 @@ struct scb_platform_data;
#endif
#define AHD_BUILD_COL_IDX(target, lun) \
- (((lun) << 4) | target)
+ ((((u8)lun) << 4) | target)
#define AHD_GET_SCB_COL_IDX(ahd, scb) \
((SCB_GET_LUN(scb) << 4) | SCB_GET_TARGET(ahd, scb))
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 69d5c43a65e5..ed333669a7dc 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -2137,7 +2137,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
if (do_fallback) {
printk("%s: device overrun (status %x) on %d:%d:%d\n",
ahd_name(ahd), status, cmd->device->channel,
- cmd->device->id, cmd->device->lun);
+ cmd->device->id, (u8)cmd->device->lun);
}
ahd_cmd_set_transaction_status(cmd, new_status);
@@ -2253,13 +2253,13 @@ ahd_linux_queue_abort_cmd(struct scsi_cmnd *cmd)
disconnected = TRUE;
if (ahd_search_qinfifo(ahd, cmd->device->id,
cmd->device->channel + 'A',
- cmd->device->lun,
+ cmd->device->lun,
pending_scb->hscb->tag,
ROLE_INITIATOR, CAM_REQ_ABORTED,
SEARCH_COMPLETE) > 0) {
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
ahd_name(ahd), cmd->device->channel,
- cmd->device->id, cmd->device->lun);
+ cmd->device->id, (u8)cmd->device->lun);
retval = SUCCESS;
goto done;
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 3c85873b14b9..8466aa784ec1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -178,10 +178,9 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahd_get_pci_bus(pci),
ahd_get_pci_slot(pci),
ahd_get_pci_function(pci));
- name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
+ name = kstrdup(buf, GFP_ATOMIC);
if (name == NULL)
return (-ENOMEM);
- strcpy(name, buf);
ahd = ahd_alloc(NULL, name);
if (ahd == NULL)
return (-ENOMEM);
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index e9778b4f7e32..27dbfccea774 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -197,7 +197,7 @@ ahd_dump_device_state(struct seq_file *m, struct scsi_device *sdev)
seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n",
sdev->sdev_target->channel + 'A',
- sdev->sdev_target->id, sdev->lun);
+ sdev->sdev_target->id, (u8)sdev->lun);
seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued);
seq_printf(m, "\t\tCommands Active %d\n", dev->active);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 114ff0c6e311..d2c9bf39033d 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -2110,7 +2110,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
*/
printk("%s:%d:%d:%d: Is not an active device\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
- cmd->device->lun);
+ (u8)cmd->device->lun);
retval = SUCCESS;
goto no_cmd;
}
@@ -2118,11 +2118,11 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
&& ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
cmd->device->channel + 'A',
- cmd->device->lun,
+ (u8)cmd->device->lun,
CAM_REQ_ABORTED, SEARCH_COMPLETE) != 0) {
printk("%s:%d:%d:%d: Command found on untagged queue\n",
ahc_name(ahc), cmd->device->channel, cmd->device->id,
- cmd->device->lun);
+ (u8)cmd->device->lun);
retval = SUCCESS;
goto done;
}
@@ -2188,13 +2188,14 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
SEARCH_COMPLETE) > 0) {
printk("%s:%d:%d:%d: Cmd aborted from QINFIFO\n",
ahc_name(ahc), cmd->device->channel,
- cmd->device->id, cmd->device->lun);
+ cmd->device->id, (u8)cmd->device->lun);
retval = SUCCESS;
goto done;
}
} else if (ahc_search_qinfifo(ahc, cmd->device->id,
cmd->device->channel + 'A',
- cmd->device->lun, pending_scb->hscb->tag,
+ cmd->device->lun,
+ pending_scb->hscb->tag,
ROLE_INITIATOR, /*status*/0,
SEARCH_COUNT) > 0) {
disconnected = FALSE;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
index ee05e8410754..0fc14dac7070 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
@@ -225,10 +225,9 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahc_get_pci_bus(pci),
ahc_get_pci_slot(pci),
ahc_get_pci_function(pci));
- name = kmalloc(strlen(buf) + 1, GFP_ATOMIC);
+ name = kstrdup(buf, GFP_ATOMIC);
if (name == NULL)
return (-ENOMEM);
- strcpy(name, buf);
ahc = ahc_alloc(NULL, name);
if (ahc == NULL)
return (-ENOMEM);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 383a3d11652d..64eec6c07a83 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -175,7 +175,7 @@ ahc_dump_device_state(struct seq_file *m, struct scsi_device *sdev)
seq_printf(m, "\tChannel %c Target %d Lun %d Settings\n",
sdev->sdev_target->channel + 'A',
- sdev->sdev_target->id, sdev->lun);
+ sdev->sdev_target->id, (u8)sdev->lun);
seq_printf(m, "\t\tCommands Queued %ld\n", dev->commands_issued);
seq_printf(m, "\t\tCommands Active %d\n", dev->active);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 652b41b4ddbd..b13764ca23fd 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2335,7 +2335,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
" poll command abort successfully \n"
, acb->host->host_no
, ccb->pcmd->device->id
- , ccb->pcmd->device->lun
+ , (u32)ccb->pcmd->device->lun
, ccb);
ccb->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(ccb);
@@ -2399,7 +2399,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
" poll command abort successfully \n"
,acb->host->host_no
,ccb->pcmd->device->id
- ,ccb->pcmd->device->lun
+ ,(u32)ccb->pcmd->device->lun
,ccb);
ccb->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(ccb);
@@ -2456,7 +2456,7 @@ polling_hbc_ccb_retry:
" poll command abort successfully \n"
, acb->host->host_no
, pCCB->pcmd->device->id
- , pCCB->pcmd->device->lun
+ , (u32)pCCB->pcmd->device->lun
, pCCB);
pCCB->pcmd->result = DID_ABORT << 16;
arcmsr_ccb_complete(pCCB);
@@ -3058,7 +3058,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
int rtn = FAILED;
printk(KERN_NOTICE
"arcmsr%d: abort device command of scsi id = %d lun = %d \n",
- acb->host->host_no, cmd->device->id, cmd->device->lun);
+ acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
acb->acb_flags |= ACB_F_ABORT;
acb->num_aborts++;
/*
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 2e797a367608..d89b9b4deb3c 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -760,7 +760,8 @@ intr_ret_t acornscsi_kick(AS_Host *host)
SCpnt->tag = SCpnt->device->current_tag;
} else
#endif
- set_bit(SCpnt->device->id * 8 + SCpnt->device->lun, host->busyluns);
+ set_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x07), host->busyluns);
host->stats.removes += 1;
@@ -863,7 +864,8 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
if (!SCpnt->scsi_done)
panic("scsi%d.H: null scsi_done function in acornscsi_done", host->host->host_no);
- clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, host->busyluns);
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), host->busyluns);
SCpnt->scsi_done(SCpnt);
} else
@@ -1576,7 +1578,8 @@ void acornscsi_message(AS_Host *host)
printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
host->host->host_no, acornscsi_target(host));
host->SCpnt->device->simple_tags = 0;
- set_bit(host->SCpnt->device->id * 8 + host->SCpnt->device->lun, host->busyluns);
+ set_bit(host->SCpnt->device->id * 8 +
+ (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
break;
#endif
case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
@@ -2671,7 +2674,8 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
//#if (DEBUG & DEBUG_ABORT)
printk("clear ");
//#endif
- clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, host->busyluns);
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), host->busyluns);
/*
* We found the command, and cleared it out. Either
@@ -2853,7 +2857,7 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
shost_for_each_device(scd, instance) {
seq_printf(m, "Device/Lun TaggedQ Sync\n");
- seq_printf(m, " %d/%d ", scd->id, scd->lun);
+ seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
seq_printf(m, "%3sabled(%3d) ",
scd->simple_tags ? "en" : "dis",
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index b46a6f6c0eb3..71cfb1e504c4 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -1821,7 +1821,8 @@ static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
SCpnt->tag = SCpnt->device->current_tag;
} else
#endif
- set_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns);
+ set_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
info->stats.removes += 1;
switch (SCpnt->cmnd[0]) {
@@ -2171,7 +2172,8 @@ static void fas216_done(FAS216_Info *info, unsigned int result)
* status.
*/
info->device[SCpnt->device->id].parity_check = 0;
- clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns);
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
fn = (void (*)(FAS216_Info *, struct scsi_cmnd *, unsigned int))SCpnt->host_scribble;
fn(info, SCpnt, result);
@@ -2398,7 +2400,8 @@ static enum res_find fas216_find_command(FAS216_Info *info,
* been set.
*/
info->origSCpnt = NULL;
- clear_bit(SCpnt->device->id * 8 + SCpnt->device->lun, info->busyluns);
+ clear_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
printk("waiting for execution ");
res = res_success;
} else
@@ -3000,7 +3003,7 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
shost_for_each_device(scd, info->host) {
dev = &info->device[scd->id];
- seq_printf(m, " %d/%d ", scd->id, scd->lun);
+ seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
seq_printf(m, "%3sabled(%3d) ",
scd->simple_tags ? "en" : "dis",
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index cb11ccef54e5..3441ce3ebabf 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -167,7 +167,8 @@ struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude)
spin_lock_irqsave(&queue->queue_lock, flags);
list_for_each(l, &queue->head) {
QE_t *q = list_entry(l, QE_t, list);
- if (!test_bit(q->SCpnt->device->id * 8 + q->SCpnt->device->lun, exclude)) {
+ if (!test_bit(q->SCpnt->device->id * 8 +
+ (u8)(q->SCpnt->device->lun & 0x7), exclude)) {
SCpnt = __queue_remove(queue, l);
break;
}
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c
index 1814aa20b724..79e6f045c2a9 100644
--- a/drivers/scsi/atari_NCR5380.c
+++ b/drivers/scsi/atari_NCR5380.c
@@ -361,17 +361,18 @@ static void __init init_tags(void)
static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
{
+ u8 lun = cmd->device->lun;
SETUP_HOSTDATA(cmd->device->host);
- if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))
+ if (hostdata->busy[cmd->device->id] & (1 << lun))
return 1;
if (!should_be_tagged ||
!setup_use_tagged_queuing || !cmd->device->tagged_supported)
return 0;
- if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
- TagAlloc[cmd->device->id][cmd->device->lun].queue_size) {
+ if (TagAlloc[cmd->device->id][lun].nr_allocated >=
+ TagAlloc[cmd->device->id][lun].queue_size) {
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
- H_NO(cmd), cmd->device->id, cmd->device->lun);
+ H_NO(cmd), cmd->device->id, lun);
return 1;
}
return 0;
@@ -385,6 +386,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
{
+ u8 lun = cmd->device->lun;
SETUP_HOSTDATA(cmd->device->host);
/* If we or the target don't support tagged queuing, allocate the LUN for
@@ -393,11 +395,11 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
if (!should_be_tagged ||
!setup_use_tagged_queuing || !cmd->device->tagged_supported) {
cmd->tag = TAG_NONE;
- hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] |= (1 << lun);
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
- "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun);
+ "command\n", H_NO(cmd), cmd->device->id, lun);
} else {
- TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
+ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
set_bit(cmd->tag, ta->allocated);
@@ -405,7 +407,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
"(now %d tags in use)\n",
H_NO(cmd), cmd->tag, cmd->device->id,
- cmd->device->lun, ta->nr_allocated);
+ lun, ta->nr_allocated);
}
}
@@ -416,21 +418,22 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
static void cmd_free_tag(Scsi_Cmnd *cmd)
{
+ u8 lun = cmd->device->lun;
SETUP_HOSTDATA(cmd->device->host);
if (cmd->tag == TAG_NONE) {
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << lun);
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
- H_NO(cmd), cmd->device->id, cmd->device->lun);
+ H_NO(cmd), cmd->device->id, lun);
} else if (cmd->tag >= MAX_TAGS) {
printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
H_NO(cmd), cmd->tag);
} else {
- TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
+ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
clear_bit(cmd->tag, ta->allocated);
ta->nr_allocated--;
dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
- H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun);
+ H_NO(cmd), cmd->tag, cmd->device->id, lun);
}
}
@@ -713,7 +716,7 @@ static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
{
int i, s;
unsigned char *command;
- printk("scsi%d: destination target %d, lun %d\n",
+ printk("scsi%d: destination target %d, lun %llu\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
printk(KERN_CONT " command = ");
command = cmd->cmnd;
@@ -759,7 +762,7 @@ static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
{
int i, s;
unsigned char *command;
- seq_printf(m, "scsi%d: destination target %d, lun %d\n",
+ seq_printf(m, "scsi%d: destination target %d, lun %llu\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
seq_printf(m, " command = ");
command = cmd->cmnd;
@@ -1060,12 +1063,13 @@ static void NCR5380_main(struct work_struct *work)
#endif
for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) {
+ u8 lun = tmp->device->lun;
#if (NDEBUG & NDEBUG_LISTS)
if (prev != tmp)
- printk("MAIN tmp=%p target=%d busy=%d lun=%d\n",
+ printk("MAIN tmp=%p target=%d busy=%d lun=%llu\n",
tmp, tmp->device->id, hostdata->busy[tmp->device->id],
- tmp->device->lun);
+ lun);
#endif
/* When we find one, remove it from the issue queue. */
/* ++guenther: possible race with Falcon locking */
@@ -1073,7 +1077,7 @@ static void NCR5380_main(struct work_struct *work)
#ifdef SUPPORT_TAGS
!is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE)
#else
- !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))
+ !(hostdata->busy[tmp->device->id] & (1 << lun))
#endif
) {
/* ++guenther: just to be sure, this must be atomic */
@@ -1099,7 +1103,7 @@ static void NCR5380_main(struct work_struct *work)
*/
dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
"lun %d removed from issue_queue\n",
- HOSTNO, tmp->device->id, tmp->device->lun);
+ HOSTNO, tmp->device->id, lun);
/*
* REQUEST SENSE commands are issued without tagged
* queueing, even on SCSI-II devices because the
@@ -2061,7 +2065,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* accesses to this device will use the
* polled-IO. */
printk(KERN_NOTICE "scsi%d: switching target %d "
- "lun %d to slow handshake\n", HOSTNO,
+ "lun %llu to slow handshake\n", HOSTNO,
cmd->device->id, cmd->device->lun);
cmd->device->borken = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
@@ -2113,7 +2117,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command "
"complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
/* Enable reselect interrupts */
@@ -2125,7 +2129,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
*/
if (!cmd->next_link) {
- printk(KERN_NOTICE "scsi%d: target %d lun %d "
+ printk(KERN_NOTICE "scsi%d: target %d lun %llu "
"linked command complete, no next_link\n",
HOSTNO, cmd->device->id, cmd->device->lun);
sink = 1;
@@ -2138,7 +2142,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
* and don't free it! */
cmd->next_link->tag = cmd->tag;
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request "
"done, calling scsi_done().\n",
HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef NCR5380_STATS
@@ -2155,7 +2159,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* ++guenther: possible race with Falcon locking */
falcon_dont_release++;
hostdata->connected = NULL;
- dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu "
"completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef SUPPORT_TAGS
cmd_free_tag(cmd);
@@ -2169,7 +2173,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
/* ++Andreas: the mid level code knows about
QUEUE_FULL now. */
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
- dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned "
"QUEUE_FULL after %d commands\n",
HOSTNO, cmd->device->id, cmd->device->lun,
ta->nr_allocated);
@@ -2267,7 +2271,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->device->tagged_supported = 0;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
cmd->tag = TAG_NONE;
- dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected "
"QUEUE_TAG message; tagged queuing "
"disabled\n",
HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2284,7 +2288,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
local_irq_restore(flags);
- dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was "
"moved from connected to the "
"disconnected_queue\n", HOSTNO,
cmd->device->id, cmd->device->lun);
@@ -2385,12 +2389,12 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
printk("\n");
} else if (tmp != EXTENDED_MESSAGE)
printk(KERN_DEBUG "scsi%d: rejecting unknown "
- "message %02x from target %d, lun %d\n",
+ "message %02x from target %d, lun %llu\n",
HOSTNO, tmp, cmd->device->id, cmd->device->lun);
else
printk(KERN_DEBUG "scsi%d: rejecting unknown "
"extended message "
- "code %02x, length %d from target %d, lun %d\n",
+ "code %02x, length %d from target %d, lun %llu\n",
HOSTNO, extended_msg[1], extended_msg[0],
cmd->device->id, cmd->device->lun);
@@ -2588,7 +2592,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
hostdata->connected = tmp;
- dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+ dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n",
HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
falcon_dont_release--;
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index fd284ff36ecf..86162811812d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -914,7 +914,7 @@ void beiscsi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
stats->r2t_pdus = conn->r2t_pdus_cnt;
stats->digest_err = 0;
stats->timeout_err = 0;
- stats->custom_length = 0;
+ stats->custom_length = 1;
strcpy(stats->custom[0].desc, "eh_abort_cnt");
stats->custom[0].value = conn->eh_abort_cnt;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 56467df3d6de..eb3e3e619155 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3538,10 +3538,9 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
+ mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
if (!mem->va)
return -ENOMEM;
- memset(mem->va, 0, mem->size);
return 0;
}
@@ -4320,9 +4319,9 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
"BM_%d : No boot session\n");
return ret;
}
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
- sizeof(*session_resp),
- &nonemb_cmd.dma);
+ nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+ sizeof(*session_resp),
+ &nonemb_cmd.dma);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
@@ -4332,7 +4331,6 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
return -ENOMEM;
}
- memset(nonemb_cmd.va, 0, sizeof(*session_resp));
tag = mgmt_get_session_info(phba, s_handle,
&nonemb_cmd);
if (!tag) {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 07934b0b9ee1..665afcb74a56 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -900,13 +900,12 @@ free_cmd:
static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
int iscsi_cmd, int size)
{
- cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
+ cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
return -ENOMEM;
}
- memset(cmd->va, 0, size);
cmd->size = size;
be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
return 0;
@@ -1015,7 +1014,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
if (if_info->dhcp_state) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : DHCP Already Enabled\n");
- return 0;
+ goto exit;
}
/* The ip_param->len is 1 in DHCP case. Setting
proper IP len as this it is used while
@@ -1033,7 +1032,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
sizeof(*reldhcp));
if (rc)
- return rc;
+ goto exit;
reldhcp = nonemb_cmd.va;
reldhcp->interface_hndl = phba->interface_handle;
@@ -1044,7 +1043,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG,
"BG_%d : Failed to Delete existing dhcp\n");
- return rc;
+ goto exit;
}
}
}
@@ -1054,7 +1053,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,
IP_ACTION_DEL);
if (rc)
- return rc;
+ goto exit;
}
/* Delete the Gateway settings if mode change is to DHCP */
@@ -1064,7 +1063,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
if (rc) {
beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to Get Gateway Addr\n");
- return rc;
+ goto exit;
}
if (gtway_addr_set.ip_addr.addr[0]) {
@@ -1076,7 +1075,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
beiscsi_log(phba, KERN_WARNING,
BEISCSI_LOG_CONFIG,
"BG_%d : Failed to clear Gateway Addr Set\n");
- return rc;
+ goto exit;
}
}
}
@@ -1087,7 +1086,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
sizeof(*dhcpreq));
if (rc)
- return rc;
+ goto exit;
dhcpreq = nonemb_cmd.va;
dhcpreq->flags = BLOCKING;
@@ -1095,12 +1094,14 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
dhcpreq->interface_hndl = phba->interface_handle;
dhcpreq->ip_type = BE2_DHCP_V4;
- return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
+ rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
} else {
- return mgmt_static_ip_modify(phba, if_info, ip_param,
+ rc = mgmt_static_ip_modify(phba, if_info, ip_param,
subnet_param, IP_ACTION_ADD);
}
+exit:
+ kfree(if_info);
return rc;
}
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index a3ab5cce4208..0f19455951ec 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -81,7 +81,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
bfa->fcs = BFA_TRUE;
fcbuild_init();
- for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
mod = &fcs_modules[i];
if (mod->attach)
mod->attach(fcs);
@@ -97,7 +97,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
int i;
struct bfa_fcs_mod_s *mod;
- for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
mod = &fcs_modules[i];
if (mod->modinit)
mod->modinit(fcs);
@@ -184,7 +184,7 @@ bfa_fcs_exit(struct bfa_fcs_s *fcs)
bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
- nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
+ nmods = ARRAY_SIZE(fcs_modules);
for (i = 0; i < nmods; i++) {
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 2e28392c2fb6..a38aafa030b3 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -72,7 +72,7 @@ struct bfa_sge_s {
} while (0)
#define bfa_swap_words(_x) ( \
- ((_x) << 32) | ((_x) >> 32))
+ ((u64)(_x) << 32) | ((u64)(_x) >> 32))
#ifdef __BIG_ENDIAN
#define bfa_sge_to_be(_x)
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 7593b7c1d336..e90a3742f09d 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1219,7 +1219,7 @@ bfad_install_msix_handler(struct bfad_s *bfad)
int
bfad_setup_intr(struct bfad_s *bfad)
{
- int error = 0;
+ int error;
u32 mask = 0, i, num_bit = 0, max_bit = 0;
struct msix_entry msix_entries[MAX_MSIX_ENTRY];
struct pci_dev *pdev = bfad->pcidev;
@@ -1234,34 +1234,24 @@ bfad_setup_intr(struct bfad_s *bfad)
if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
(bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
- error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
- if (error) {
- /* In CT1 & CT2, try to allocate just one vector */
- if (bfa_asic_id_ctc(pdev->device)) {
- printk(KERN_WARNING "bfa %s: trying one msix "
- "vector failed to allocate %d[%d]\n",
- bfad->pci_name, bfad->nvec, error);
- bfad->nvec = 1;
- error = pci_enable_msix(bfad->pcidev,
- msix_entries, bfad->nvec);
- }
+ error = pci_enable_msix_exact(bfad->pcidev,
+ msix_entries, bfad->nvec);
+ /* In CT1 & CT2, try to allocate just one vector */
+ if (error == -ENOSPC && bfa_asic_id_ctc(pdev->device)) {
+ printk(KERN_WARNING "bfa %s: trying one msix "
+ "vector failed to allocate %d[%d]\n",
+ bfad->pci_name, bfad->nvec, error);
+ bfad->nvec = 1;
+ error = pci_enable_msix_exact(bfad->pcidev,
+ msix_entries, 1);
+ }
- /*
- * Only error number of vector is available.
- * We don't have a mechanism to map multiple
- * interrupts into one vector, so even if we
- * can try to request less vectors, we don't
- * know how to associate interrupt events to
- * vectors. Linux doesn't duplicate vectors
- * in the MSIX table for this case.
- */
- if (error) {
- printk(KERN_WARNING "bfad%d: "
- "pci_enable_msix failed (%d), "
- "use line based.\n",
- bfad->inst_no, error);
- goto line_based;
- }
+ if (error) {
+ printk(KERN_WARNING "bfad%d: "
+ "pci_enable_msix_exact failed (%d), "
+ "use line based.\n",
+ bfad->inst_no, error);
+ goto line_based;
}
/* Disable INTX in MSI-X mode */
@@ -1281,20 +1271,18 @@ bfad_setup_intr(struct bfad_s *bfad)
bfad->bfad_flags |= BFAD_MSIX_ON;
- return error;
+ return 0;
}
line_based:
- error = 0;
- if (request_irq
- (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
- BFAD_DRIVER_NAME, bfad) != 0) {
- /* Enable interrupt handler failed */
- return 1;
- }
+ error = request_irq(bfad->pcidev->irq, (irq_handler_t)bfad_intx,
+ BFAD_IRQ_FLAGS, BFAD_DRIVER_NAME, bfad);
+ if (error)
+ return error;
+
bfad->bfad_flags |= BFAD_INTX_ON;
- return error;
+ return 0;
}
void
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 8994fb857ee9..023b9d42ad9a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -26,7 +26,6 @@ int
bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
- int rc = 0;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -34,7 +33,7 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
- return rc;
+ return 0;
}
init_completion(&bfad->enable_comp);
@@ -43,21 +42,20 @@ bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
wait_for_completion(&bfad->enable_comp);
- return rc;
+ return 0;
}
int
bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
{
struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
- int rc = 0;
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
iocmd->status = BFA_STATUS_OK;
- return rc;
+ return 0;
}
if (bfad->disable_active) {
@@ -74,7 +72,7 @@ bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
bfad->disable_active = BFA_FALSE;
iocmd->status = BFA_STATUS_OK;
- return rc;
+ return 0;
}
static int
@@ -3270,13 +3268,13 @@ bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
/* Allocate dma coherent memory */
buf_info = buf_base;
buf_info->size = payload_len;
- buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
- &buf_info->phys, GFP_KERNEL);
+ buf_info->virt = dma_zalloc_coherent(&bfad->pcidev->dev,
+ buf_info->size, &buf_info->phys,
+ GFP_KERNEL);
if (!buf_info->virt)
goto out_free_mem;
/* copy the linear bsg buffer to buf_info */
- memset(buf_info->virt, 0, buf_info->size);
memcpy(buf_info->virt, payload_kbuf, buf_info->size);
/*
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
index e1f1e3448f98..fe2106c91c08 100644
--- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -1,3 +1,16 @@
+/* 57xx_hsi_bnx2fc.h: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
#ifndef __57XX_FCOE_HSI_LINUX_LE__
#define __57XX_FCOE_HSI_LINUX_LE__
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
index cfcad8bde7cf..f245d543d7b1 100644
--- a/drivers/scsi/bnx2fc/Kconfig
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -1,5 +1,5 @@
config SCSI_BNX2X_FCOE
- tristate "Broadcom NetXtreme II FCoE support"
+ tristate "QLogic NetXtreme II FCoE support"
depends on PCI
select NETDEVICES
select ETHERNET
@@ -8,5 +8,5 @@ config SCSI_BNX2X_FCOE
select LIBFCOE
select CNIC
---help---
- This driver supports FCoE offload for the Broadcom NetXtreme II
+ This driver supports FCoE offload for the QLogic NetXtreme II
devices.
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 6a976657b475..1346e052e03c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -1,8 +1,7 @@
-#ifndef _BNX2FC_H_
-#define _BNX2FC_H_
-/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
+/* bnx2fc.h: QLogic NetXtreme II Linux FCoE offload driver.
*
* Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,6 +10,8 @@
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
*/
+#ifndef _BNX2FC_H_
+#define _BNX2FC_H_
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
index dad9924abbbb..e147cc7ee36c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_constants.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -1,3 +1,16 @@
+/* bnx2fc_constants.h: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
#ifndef __BNX2FC_CONSTANTS_H_
#define __BNX2FC_CONSTANTS_H_
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c
index 0cbee1b23ee2..d055df01faa5 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c
@@ -1,3 +1,16 @@
+/* bnx2fc_debug.c: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
#include "bnx2fc.h"
void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 4808ff99621f..2b9006774f37 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -1,3 +1,16 @@
+/* bnx2fc_debug.h: QLogic NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
#ifndef __BNX2FC_DEBUG__
#define __BNX2FC_DEBUG__
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index b1c9a4f8caee..ca75c7ca2559 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -1,9 +1,10 @@
/*
- * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * bnx2fc_els.c: QLogic NetXtreme II Linux FCoE offload driver.
* This file contains helper routines that handle ELS requests
* and responses.
*
* Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 785d0d71781e..79e5c94107a9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -1,9 +1,10 @@
-/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
+/* bnx2fc_fcoe.c: QLogic NetXtreme II Linux FCoE offload driver.
* This file contains the code that interacts with libfc, libfcoe,
* cnic modules to create FCoE instances, send/receive non-offloaded
* FIP/FCoE packets, listen to link events etc.
*
* Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,12 +27,12 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
static char version[] =
- "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
+ "QLogic NetXtreme II FCoE Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
+MODULE_DESCRIPTION("QLogic NetXtreme II BCM57710 FCoE Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -692,7 +693,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
if (!lport->vport)
fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
snprintf(fc_host_symbolic_name(lport->host), 256,
- "%s (Broadcom %s) v%s over %s",
+ "%s (QLogic %s) v%s over %s",
BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION,
interface->netdev->name);
@@ -2775,7 +2776,7 @@ static struct fc_function_template bnx2fc_vport_xport_function = {
*/
static struct scsi_host_template bnx2fc_shost_template = {
.module = THIS_MODULE,
- .name = "Broadcom Offload FCoE Initiator",
+ .name = "QLogic Offload FCoE Initiator",
.queuecommand = bnx2fc_queuecommand,
.eh_abort_handler = bnx2fc_eh_abort, /* abts */
.eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 512aed3ae4f1..c6688d72a846 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1,8 +1,9 @@
-/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
+/* bnx2fc_hwi.c: QLogic NetXtreme II Linux FCoE offload driver.
* This file contains the code that low level functions that interact
* with 57712 FCoE firmware.
*
* Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 7bc47fc7c686..4c5891e66038 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1,7 +1,8 @@
-/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
+/* bnx2fc_io.c: QLogic NetXtreme II Linux FCoE offload driver.
* IO manager and SCSI IO processing.
*
* Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1450,9 +1451,9 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct bnx2fc_rport *tgt = io_req->tgt;
struct bnx2fc_cmd *cmd, *tmp;
- int tm_lun = sc_cmd->device->lun;
+ u64 tm_lun = sc_cmd->device->lun;
+ u64 lun;
int rc = 0;
- int lun;
/* called with tgt_lock held */
BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 6870cf6781d9..c66c708412a6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -1,8 +1,9 @@
-/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
+/* bnx2fc_tgt.c: QLogic NetXtreme II Linux FCoE offload driver.
* Handles operations such as session offload/upload etc, and manages
* session resources such as connection id and qp resources.
*
* Copyright (c) 2008 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 3d33767f2f2c..917534109a50 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,13 +1,15 @@
-/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
+/* 57xx_iscsi_constants.h: QLogic NetXtreme II iSCSI HSI
*
* Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
- * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
#ifndef __57XX_ISCSI_CONSTANTS_H_
#define __57XX_ISCSI_CONSTANTS_H_
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index 7052a839b0ea..19b3a97dbacd 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,13 +1,15 @@
-/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
+/* 57xx_iscsi_hsi.h: QLogic NetXtreme II iSCSI HSI.
*
* Copyright (c) 2006 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
- * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
#ifndef __57XX_ISCSI_HSI_LINUX_LE__
#define __57XX_ISCSI_HSI_LINUX_LE__
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
index 01cff1894b6d..44ce54e536e5 100644
--- a/drivers/scsi/bnx2i/Kconfig
+++ b/drivers/scsi/bnx2i/Kconfig
@@ -1,5 +1,5 @@
config SCSI_BNX2_ISCSI
- tristate "Broadcom NetXtreme II iSCSI support"
+ tristate "QLogic NetXtreme II iSCSI support"
depends on NET
depends on PCI
select SCSI_ISCSI_ATTRS
@@ -8,5 +8,5 @@ config SCSI_BNX2_ISCSI
select NET_VENDOR_BROADCOM
select CNIC
---help---
- This driver supports iSCSI offload for the Broadcom NetXtreme II
+ This driver supports iSCSI offload for the QLogic NetXtreme II
devices.
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index c73bbcb63c02..ed7f3228e234 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,15 +1,17 @@
-/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
+/* bnx2i.h: QLogic NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
- * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
#ifndef _BNX2I_H_
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index d6d491c2f004..fb072cc5e9fd 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,15 +1,17 @@
-/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
+/* bnx2i_hwi.c: QLogic NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
- * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
#include <linux/gfp.h>
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 80c03b452d61..c8b410c24cf0 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,15 +1,17 @@
-/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
+/* bnx2i.c: QLogic NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
- * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
#include "bnx2i.h"
@@ -18,18 +20,18 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.7.6.2"
-#define DRV_MODULE_RELDATE "Jun 06, 2013"
+#define DRV_MODULE_VERSION "2.7.10.1"
+#define DRV_MODULE_RELDATE "Jul 16, 2014"
static char version[] =
- "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+ "QLogic NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
"Eddie Wai <eddie.wai@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
+MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/57710/57711/57712"
"/57800/57810/57840 iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 166543f7ef55..40e22497d249 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,16 +1,18 @@
/*
- * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
+ * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver.
*
* Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
- * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
#include <linux/slab.h>
@@ -1643,12 +1645,11 @@ static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
stats->r2t_pdus = conn->r2t_pdus_cnt;
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
- stats->custom_length = 3;
- strcpy(stats->custom[2].desc, "eh_abort_cnt");
- stats->custom[2].value = conn->eh_abort_cnt;
stats->digest_err = 0;
stats->timeout_err = 0;
- stats->custom_length = 0;
+ strcpy(stats->custom[0].desc, "eh_abort_cnt");
+ stats->custom[0].value = conn->eh_abort_cnt;
+ stats->custom_length = 1;
}
@@ -2249,7 +2250,7 @@ static umode_t bnx2i_attr_is_visible(int param_type, int param)
*/
static struct scsi_host_template bnx2i_host_template = {
.module = THIS_MODULE,
- .name = "Broadcom Offload iSCSI Initiator",
+ .name = "QLogic Offload iSCSI Initiator",
.proc_name = "bnx2i",
.queuecommand = iscsi_queuecommand,
.eh_abort_handler = iscsi_eh_abort,
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index a0a3d9fe61fe..6d56fd60cb2b 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,13 +1,15 @@
-/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
+/* bnx2i_sysfs.c: QLogic NetXtreme II iSCSI driver.
*
* Copyright (c) 2004 - 2013 Broadcom Corporation
+ * Copyright (c) 2014, QLogic Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
- * Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
+ * Maintained by: QLogic-Storage-Upstream@qlogic.com
*/
#include "bnx2i.h"
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 2a323742ce04..ef5ae0d03616 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -84,15 +84,19 @@ static const char * vendor_labels[CH_TYPES-4] = {
};
// module_param_string_array(vendor_labels, NULL, 0444);
+#define ch_printk(prefix, ch, fmt, a...) \
+ sdev_printk(prefix, (ch)->device, "[%s] " fmt, \
+ (ch)->name, ##a)
+
#define DPRINTK(fmt, arg...) \
do { \
if (debug) \
- printk(KERN_DEBUG "%s: " fmt, ch->name, ##arg); \
+ ch_printk(KERN_DEBUG, ch, fmt, ##arg); \
} while (0)
#define VPRINTK(level, fmt, arg...) \
do { \
if (verbose) \
- printk(level "%s: " fmt, ch->name, ##arg); \
+ ch_printk(level, ch, fmt, ##arg); \
} while (0)
/* ------------------------------------------------------------------- */
@@ -196,7 +200,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd,
__scsi_print_command(cmd);
}
- result = scsi_execute_req(ch->device, cmd, direction, buffer,
+ result = scsi_execute_req(ch->device, cmd, direction, buffer,
buflength, &sshdr, timeout * HZ,
MAX_RETRIES, NULL);
@@ -247,7 +251,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
retry:
memset(cmd,0,sizeof(cmd));
cmd[0] = READ_ELEMENT_STATUS;
- cmd[1] = (ch->device->lun << 5) |
+ cmd[1] = ((ch->device->lun & 0x7) << 5) |
(ch->voltags ? 0x10 : 0) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
@@ -283,7 +287,7 @@ ch_init_elem(scsi_changer *ch)
VPRINTK(KERN_INFO, "INITIALIZE ELEMENT STATUS, may take some time ...\n");
memset(cmd,0,sizeof(cmd));
cmd[0] = INITIALIZE_ELEMENT_STATUS;
- cmd[1] = ch->device->lun << 5;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
err = ch_do_scsi(ch, cmd, NULL, 0, DMA_NONE);
VPRINTK(KERN_INFO, "... finished\n");
return err;
@@ -303,7 +307,7 @@ ch_readconfig(scsi_changer *ch)
memset(cmd,0,sizeof(cmd));
cmd[0] = MODE_SENSE;
- cmd[1] = ch->device->lun << 5;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = 0x1d;
cmd[4] = 255;
result = ch_do_scsi(ch, cmd, buffer, 255, DMA_FROM_DEVICE);
@@ -428,7 +432,7 @@ ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = POSITION_TO_ELEMENT;
- cmd[1] = ch->device->lun << 5;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (elem >> 8) & 0xff;
@@ -447,7 +451,7 @@ ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = MOVE_MEDIUM;
- cmd[1] = ch->device->lun << 5;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (src >> 8) & 0xff;
@@ -470,7 +474,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
trans = ch->firsts[CHET_MT];
memset(cmd,0,sizeof(cmd));
cmd[0] = EXCHANGE_MEDIUM;
- cmd[1] = ch->device->lun << 5;
+ cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = (trans >> 8) & 0xff;
cmd[3] = trans & 0xff;
cmd[4] = (src >> 8) & 0xff;
@@ -518,7 +522,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
elem, tag);
memset(cmd,0,sizeof(cmd));
cmd[0] = SEND_VOLUME_TAG;
- cmd[1] = (ch->device->lun << 5) |
+ cmd[1] = ((ch->device->lun & 0x7) << 5) |
ch_elem_to_typecode(ch,elem);
cmd[2] = (elem >> 8) & 0xff;
cmd[3] = elem & 0xff;
@@ -754,7 +758,7 @@ static long ch_ioctl(struct file *file,
voltag_retry:
memset(ch_cmd, 0, sizeof(ch_cmd));
ch_cmd[0] = READ_ELEMENT_STATUS;
- ch_cmd[1] = (ch->device->lun << 5) |
+ ch_cmd[1] = ((ch->device->lun & 0x7) << 5) |
(ch->voltags ? 0x10 : 0) |
ch_elem_to_typecode(ch,elem);
ch_cmd[2] = (elem >> 8) & 0xff;
@@ -924,8 +928,8 @@ static int ch_probe(struct device *dev)
MKDEV(SCSI_CHANGER_MAJOR, ch->minor), ch,
"s%s", ch->name);
if (IS_ERR(class_dev)) {
- printk(KERN_WARNING "ch%d: device_create failed\n",
- ch->minor);
+ sdev_printk(KERN_WARNING, sd, "ch%d: device_create failed\n",
+ ch->minor);
ret = PTR_ERR(class_dev);
goto remove_idr;
}
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 7494e4bc69cc..86103c8475d8 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1657,7 +1657,7 @@ csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
case FW_SCSI_UNDER_FLOW_ERR:
csio_warn(hw,
"Under-flow error,cmnd:0x%x expected"
- " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n",
+ " len:0x%x resid:0x%x lun:0x%llx ssn:0x%x\n",
cmnd->cmnd[0], scsi_bufflen(cmnd),
scsi_get_resid(cmnd), cmnd->device->lun,
rn->flowid);
@@ -1957,7 +1957,7 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
csio_dbg(hw,
"Request to abort ioreq:%p cmd:%p cdb:%08llx"
- " ssni:0x%x lun:%d iq:0x%x\n",
+ " ssni:0x%x lun:%llu iq:0x%x\n",
ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
@@ -2015,13 +2015,13 @@ inval_scmnd:
/* FW successfully aborted the request */
if (host_byte(cmnd->result) == DID_REQUEUE) {
csio_info(hw,
- "Aborted SCSI command to (%d:%d) serial#:0x%lx\n",
+ "Aborted SCSI command to (%d:%llu) serial#:0x%lx\n",
cmnd->device->id, cmnd->device->lun,
cmnd->serial_number);
return SUCCESS;
} else {
csio_info(hw,
- "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n",
+ "Failed to abort SCSI command, (%d:%llu) serial#:0x%lx\n",
cmnd->device->id, cmnd->device->lun,
cmnd->serial_number);
return FAILED;
@@ -2100,13 +2100,13 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
if (!rn)
goto fail;
- csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n",
+ csio_dbg(hw, "Request to reset LUN:%llu (ssni:0x%x tgtid:%d)\n",
cmnd->device->lun, rn->flowid, rn->scsi_id);
if (!csio_is_lnode_ready(ln)) {
csio_err(hw,
"LUN reset cannot be issued on non-ready"
- " local node vnpi:0x%x (LUN:%d)\n",
+ " local node vnpi:0x%x (LUN:%llu)\n",
ln->vnp_flowid, cmnd->device->lun);
goto fail;
}
@@ -2126,7 +2126,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
if (fc_remote_port_chkready(rn->rport)) {
csio_err(hw,
"LUN reset cannot be issued on non-ready"
- " remote node ssni:0x%x (LUN:%d)\n",
+ " remote node ssni:0x%x (LUN:%llu)\n",
rn->flowid, cmnd->device->lun);
goto fail;
}
@@ -2168,7 +2168,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
sld.level = CSIO_LEV_LUN;
sld.lnode = ioreq->lnode;
sld.rnode = ioreq->rnode;
- sld.oslun = (uint64_t)cmnd->device->lun;
+ sld.oslun = cmnd->device->lun;
spin_lock_irqsave(&hw->lock, flags);
/* Kick off TM SM on the ioreq */
@@ -2190,7 +2190,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
/* LUN reset timed-out */
if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
- csio_err(hw, "LUN reset (%d:%d) timed out\n",
+ csio_err(hw, "LUN reset (%d:%llu) timed out\n",
cmnd->device->id, cmnd->device->lun);
spin_lock_irq(&hw->lock);
@@ -2203,7 +2203,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
/* LUN reset returned, check cached status */
if (cmnd->SCp.Status != FW_SUCCESS) {
- csio_err(hw, "LUN reset failed (%d:%d), status: %d\n",
+ csio_err(hw, "LUN reset failed (%d:%llu), status: %d\n",
cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
goto fail;
}
@@ -2223,7 +2223,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
/* Aborts may have timed out */
if (retval != 0) {
csio_err(hw,
- "Attempt to abort I/Os during LUN reset of %d"
+ "Attempt to abort I/Os during LUN reset of %llu"
" returned %d\n", cmnd->device->lun, retval);
/* Return I/Os back to active_q */
spin_lock_irq(&hw->lock);
@@ -2234,7 +2234,7 @@ csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
CSIO_INC_STATS(rn, n_lun_rst);
- csio_info(hw, "LUN reset occurred (%d:%d)\n",
+ csio_info(hw, "LUN reset occurred (%d:%llu)\n",
cmnd->device->id, cmnd->device->lun);
return SUCCESS;
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 4255ce264abf..773da14cfa14 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -232,7 +232,7 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
q = wrm->q_arr[free_idx];
- q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
+ q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
@@ -240,12 +240,6 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
return -1;
}
- /*
- * We need to zero out the contents, importantly for ingress,
- * since we start with a generatiom bit of 1 for ingress.
- */
- memset(q->vstart, 0, qsz);
-
q->type = type;
q->owner = owner;
q->pidx = q->cidx = q->inc_idx = 0;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3d5322d59f15..d65df6dc106f 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -283,7 +283,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
}
EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
-int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun,
+int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
unsigned int max_id, struct scsi_host_template *sht,
struct scsi_transport_template *stt)
{
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 8ad73d913f02..b3e6e7541cc5 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -709,7 +709,7 @@ void cxgbi_device_unregister(struct cxgbi_device *);
void cxgbi_device_unregister_all(unsigned int flag);
struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
-int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int,
+int cxgbi_hbas_add(struct cxgbi_device *, u64, unsigned int,
struct scsi_host_template *,
struct scsi_transport_template *);
void cxgbi_hbas_remove(struct cxgbi_device *);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 83d9bf6fa6ca..0c6be0a17f53 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -519,9 +519,7 @@ static struct ParameterData cfg_data[] = {
CFG_PARAM_UNSET,
0,
0x2f,
-#ifdef CONFIG_SCSI_MULTI_LUN
- NAC_SCANLUN |
-#endif
+ NAC_SCANLUN |
NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
/*| NAC_ACTIVE_NEG*/,
NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
@@ -1089,7 +1087,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
struct AdapterCtlBlk *acb =
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
- cmd, cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
/* Assume BAD_TARGET; will be cleared later */
cmd->result = DID_BAD_TARGET << 16;
@@ -1104,7 +1102,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
/* does the specified lun on the specified device exist */
if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
- cmd->device->id, cmd->device->lun);
+ cmd->device->id, (u8)cmd->device->lun);
goto complete;
}
@@ -1113,7 +1111,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
if (!dcb) {
/* should never happen */
dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
- cmd->device->id, cmd->device->lun);
+ cmd->device->id, (u8)cmd->device->lun);
goto complete;
}
@@ -1209,7 +1207,7 @@ static void dump_register_info(struct AdapterCtlBlk *acb,
"cmnd=0x%02x <%02i-%i>\n",
srb, srb->cmd,
srb->cmd->cmnd[0], srb->cmd->device->id,
- srb->cmd->device->lun);
+ (u8)srb->cmd->device->lun);
printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
srb->segment_x, srb->sg_count, srb->sg_index,
srb->total_xfer_length);
@@ -1304,7 +1302,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
(struct AdapterCtlBlk *)cmd->device->host->hostdata;
dprintkl(KERN_INFO,
"eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
- cmd, cmd->device->id, cmd->device->lun, cmd);
+ cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
if (timer_pending(&acb->waiting_timer))
del_timer(&acb->waiting_timer);
@@ -1371,7 +1369,7 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
struct DeviceCtlBlk *dcb;
struct ScsiReqBlk *srb;
dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
- cmd, cmd->device->id, cmd->device->lun, cmd);
+ cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
if (!dcb) {
@@ -1607,7 +1605,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
"Out of tags target=<%02i-%i>)\n",
srb->cmd, srb->cmd->device->id,
- srb->cmd->device->lun);
+ (u8)srb->cmd->device->lun);
srb->state = SRB_READY;
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
DO_HWRESELECT);
@@ -1625,7 +1623,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
/*polling:*/
/* Send CDB ..command block ......... */
dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
srb->cmd->cmnd[0], srb->tag_number);
if (srb->flag & AUTO_REQSENSE) {
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
@@ -2043,7 +2041,7 @@ static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 scsi_status = *pscsi_status;
u32 d_left_counter = 0;
dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
/*
* KG: We need to drain the buffers before we draw any conclusions!
@@ -2173,7 +2171,7 @@ static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
clear_fifo(acb, "data_out_phase1");
/* do prepare before transfer when data out phase */
data_io_transfer(acb, srb, XFERDATAOUT);
@@ -2185,7 +2183,7 @@ static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 scsi_status = *pscsi_status;
dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
/*
* KG: DataIn is much more tricky than DataOut. When the device is finished
@@ -2396,7 +2394,7 @@ static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
data_io_transfer(acb, srb, XFERDATAIN);
}
@@ -2408,7 +2406,7 @@ static void data_io_transfer(struct AdapterCtlBlk *acb,
u8 bval;
dprintkdbg(DBG_0,
"data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun,
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
((io_dir & DMACMD_DIR) ? 'r' : 'w'),
srb->total_xfer_length, srb->sg_index, srb->sg_count);
if (srb == acb->tmp_srb)
@@ -2581,7 +2579,7 @@ static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO); /* get message */
srb->state = SRB_COMPLETED;
@@ -2595,7 +2593,7 @@ static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
u16 *pscsi_status)
{
dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
- srb->cmd, srb->cmd->device->id, srb->cmd->device->lun);
+ srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
srb->state = SRB_STATUS;
DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
@@ -3320,7 +3318,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
int ckc_only = 1;
dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
- srb->cmd->device->id, srb->cmd->device->lun);
+ srb->cmd->device->id, (u8)srb->cmd->device->lun);
dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
scsi_sgtalbe(cmd));
@@ -3500,7 +3498,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
if (srb->total_xfer_length)
dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
"cmnd=0x%02x Missed %i bytes\n",
- cmd, cmd->device->id, cmd->device->lun,
+ cmd, cmd->device->id, (u8)cmd->device->lun,
cmd->cmnd[0], srb->total_xfer_length);
}
@@ -3540,7 +3538,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
dir = p->sc_data_direction;
result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
- p->device->id, p->device->lun);
+ p->device->id, (u8)p->device->lun);
srb_going_remove(dcb, srb);
free_tag(dcb, srb);
srb_free_insert(acb, srb);
@@ -3570,7 +3568,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id,
- p->device->lun);
+ (u8)p->device->lun);
srb_waiting_remove(dcb, srb);
srb_free_insert(acb, srb);
p->result = result;
@@ -3679,7 +3677,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
{
struct scsi_cmnd *cmd = srb->cmd;
dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
- cmd, cmd->device->id, cmd->device->lun);
+ cmd, cmd->device->id, (u8)cmd->device->lun);
srb->flag |= AUTO_REQSENSE;
srb->adapter_status = 0;
@@ -4434,15 +4432,10 @@ static void adapter_init_scsi_host(struct Scsi_Host *host)
if (host->max_id - 1 == eeprom->scsi_id)
host->max_id--;
-#ifdef CONFIG_SCSI_MULTI_LUN
if (eeprom->channel_cfg & NAC_SCANLUN)
host->max_lun = 8;
else
host->max_lun = 1;
-#else
- host->max_lun = 1;
-#endif
-
}
@@ -4645,7 +4638,7 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
SPRINTF("irq_level 0x%04x, ", acb->irq_level);
SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
- SPRINTF("MaxID %i, MaxLUN %i, ", host->max_id, host->max_lun);
+ SPRINTF("MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
SPRINTF("AdapterID %i\n", host->this_id);
SPRINTF("tag_max_num %i", acb->tag_max_num);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index c0ae8fa57a3b..67283ef418ac 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -459,7 +459,7 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd
* to the device structure. This should be a TEST_UNIT_READY
* command from scan_scsis_single.
*/
- if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
+ if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
cmd->result = (DID_NO_CONNECT << 16);
@@ -579,8 +579,8 @@ static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
unit = d->pI2o_dev->lct_data.tid;
- seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
- unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
+ seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
+ unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
scsi_device_online(d->pScsi_dev)? "online":"offline");
d = d->next_lun;
}
@@ -1162,7 +1162,7 @@ static void adpt_i2o_delete_hba(adpt_hba* pHba)
}
}
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
+static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
{
struct adpt_device* d;
@@ -1462,7 +1462,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba)
i2o_lct *lct = pHba->lct;
u8 bus_no = 0;
s16 scsi_id;
- s16 scsi_lun;
+ u64 scsi_lun;
u32 buf[10]; // larger than 7, or 8 ...
struct adpt_device* pDev;
@@ -1496,7 +1496,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba)
}
bus_no = buf[0]>>16;
scsi_id = buf[1];
- scsi_lun = (buf[2]>>8 )&0xff;
+ scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
continue;
@@ -1571,7 +1571,7 @@ static int adpt_i2o_parse_lct(adpt_hba* pHba)
if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
bus_no = buf[0]>>16;
scsi_id = buf[1];
- scsi_lun = (buf[2]>>8 )&0xff;
+ scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
continue;
}
@@ -2407,8 +2407,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
case I2O_SCSI_DSC_COMMAND_TIMEOUT:
case I2O_SCSI_DSC_NO_ADAPTER:
case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
- printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
+ printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
+ pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
cmd->result = (DID_TIME_OUT << 16);
break;
case I2O_SCSI_DSC_ADAPTER_BUSY:
@@ -2447,8 +2447,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
case I2O_SCSI_DSC_QUEUE_FROZEN:
case I2O_SCSI_DSC_REQUEST_INVALID:
default:
- printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
+ printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
+ pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
hba_status, dev_status, cmd->cmnd[0]);
cmd->result = (DID_ERROR << 16);
break;
@@ -2464,8 +2464,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
cmd->sense_buffer[2] == DATA_PROTECT ){
/* This is to handle an array failed */
cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
+ printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
+ pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
hba_status, dev_status, cmd->cmnd[0]);
}
@@ -2476,8 +2476,8 @@ static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
* for a limitted number of retries.
*/
cmd->result = (DID_TIME_OUT << 16);
- printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
- pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
+ printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
+ pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
}
@@ -2517,7 +2517,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
i2o_lct *lct = pHba->lct;
u8 bus_no = 0;
s16 scsi_id;
- s16 scsi_lun;
+ u64 scsi_lun;
u32 buf[10]; // at least 8 u32's
struct adpt_device* pDev = NULL;
struct i2o_device* pI2o_dev = NULL;
@@ -2564,7 +2564,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
}
scsi_id = buf[1];
- scsi_lun = (buf[2]>>8 )&0xff;
+ scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
pDev = pHba->channel[bus_no].device[scsi_id];
/* da lun */
while(pDev) {
@@ -2633,7 +2633,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
while(pDev) {
if(pDev->scsi_lun == scsi_lun) {
if(!scsi_device_online(pDev->pScsi_dev)) {
- printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
+ printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
pHba->name,bus_no,scsi_id,scsi_lun);
if (pDev->pScsi_dev) {
scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
@@ -2665,7 +2665,7 @@ static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
// in the LCT table
if (pDev->state & DPTI_DEV_UNSCANNED){
pDev->state = DPTI_DEV_OFFLINE;
- printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
+ printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
if (pDev->pScsi_dev) {
scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
}
diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h
index aeb046186c84..1fa345ab8ecb 100644
--- a/drivers/scsi/dpti.h
+++ b/drivers/scsi/dpti.h
@@ -184,7 +184,7 @@ struct adpt_device {
u32 block_size;
u8 scsi_channel;
u8 scsi_id;
- u8 scsi_lun;
+ u64 scsi_lun;
u8 state;
u16 tid;
struct i2o_device* pI2o_dev;
@@ -231,7 +231,7 @@ typedef struct _adpt_hba {
u32 sg_tablesize; // Scatter/Gather List Size.
u8 top_scsi_channel;
u8 top_scsi_id;
- u8 top_scsi_lun;
+ u64 top_scsi_lun;
u8 dma64;
i2o_status_block* status_block;
@@ -300,7 +300,7 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m);
static void adpt_i2o_delete_hba(adpt_hba* pHba);
static void adpt_inquiry(adpt_hba* pHba);
static void adpt_fail_posted_scbs(adpt_hba* pHba);
-static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun);
+static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun);
static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) ;
static int adpt_i2o_online_hba(adpt_hba* pHba);
static void adpt_i2o_post_wait_complete(u32, int);
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index ebf57364df91..813dd5c998e4 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1238,8 +1238,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
struct eata_config *cf;
dma_addr_t cf_dma_addr;
- cf = pci_alloc_consistent(pdev, sizeof(struct eata_config),
- &cf_dma_addr);
+ cf = pci_zalloc_consistent(pdev, sizeof(struct eata_config),
+ &cf_dma_addr);
if (!cf) {
printk
@@ -1249,7 +1249,6 @@ static int port_detect(unsigned long port_base, unsigned int j,
}
/* Set board configuration */
- memset((char *)cf, 0, sizeof(struct eata_config));
cf->len = (ushort) H2DEV16((ushort) 510);
cf->ocena = 1;
@@ -1399,7 +1398,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
if (shost->max_id > 8 || shost->max_lun > 8)
printk
- ("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n",
+ ("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n",
ha->board_name, shost->max_id, shost->max_lun);
for (i = 0; i <= shost->max_channel; i++)
@@ -2449,7 +2448,7 @@ static irqreturn_t ihdlr(struct Scsi_Host *shost)
"target_status 0x%x, sense key 0x%x.\n",
ha->board_name,
SCpnt->device->channel, SCpnt->device->id,
- SCpnt->device->lun,
+ (u8)SCpnt->device->lun,
spp->target_status, SCpnt->sense_buffer[2]);
ha->target_to[SCpnt->device->id][SCpnt->device->channel] = 0;
diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
index 7d9b54ae7f62..a0dd1b67a467 100644
--- a/drivers/scsi/fnic/fnic_isr.c
+++ b/drivers/scsi/fnic/fnic_isr.c
@@ -257,8 +257,8 @@ int fnic_set_intr_mode(struct fnic *fnic)
fnic->raw_wq_count >= m &&
fnic->wq_copy_count >= o &&
fnic->cq_count >= n + m + o) {
- if (!pci_enable_msix(fnic->pdev, fnic->msix_entry,
- n + m + o + 1)) {
+ if (!pci_enable_msix_exact(fnic->pdev, fnic->msix_entry,
+ n + m + o + 1)) {
fnic->rq_count = n;
fnic->raw_wq_count = m;
fnic->wq_copy_count = o;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index ea28b5ca4c73..961bdf5d31cd 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1753,7 +1753,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
tag = sc->request->tag;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
- "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+ "Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
CMD_FLAGS(sc) = FNIC_NO_FLAGS;
@@ -2207,7 +2207,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+ "Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
rport->port_id, sc->device->lun, sc);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
@@ -2224,6 +2224,22 @@ int fnic_device_reset(struct scsi_cmnd *sc)
tag = sc->request->tag;
if (unlikely(tag < 0)) {
+ /*
+ * XXX(hch): current the midlayer fakes up a struct
+ * request for the explicit reset ioctls, and those
+ * don't have a tag allocated to them. The below
+ * code pokes into midlayer structures to paper over
+ * this design issue, but that won't work for blk-mq.
+ *
+ * Either someone who can actually test the hardware
+ * will have to come up with a similar hack for the
+ * blk-mq case, or we'll have to bite the bullet and
+ * fix the way the EH ioctls work for real, but until
+ * that happens we fail these explicit requests here.
+ */
+ if (shost_use_blk_mq(sc->device->host))
+ goto fnic_device_reset_end;
+
tag = fnic_scsi_host_start_tag(fnic, sc);
if (unlikely(tag == SCSI_NO_TAG))
goto fnic_device_reset_end;
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index a1bc8ca958e1..b331272e93bc 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -768,7 +768,7 @@ static void sprint_command(struct seq_file *m, unsigned char *command)
static void sprint_Scsi_Cmnd(struct seq_file *m, Scsi_Cmnd * cmd)
{
- PRINTP("host number %d destination target %d, lun %d\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun);
+ PRINTP("host number %d destination target %d, lun %llu\n" ANDP cmd->device->host->host_no ANDP cmd->device->id ANDP cmd->device->lun);
PRINTP(" command = ");
sprint_command(m, cmd->cmnd);
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3cbb57a8b846..6de80e352871 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -204,18 +204,33 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
struct scsi_host_template *sht = shost->hostt;
int error = -EINVAL;
- printk(KERN_INFO "scsi%d : %s\n", shost->host_no,
+ shost_printk(KERN_INFO, shost, "%s\n",
sht->info ? sht->info(shost) : sht->name);
if (!shost->can_queue) {
- printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
- sht->name);
+ shost_printk(KERN_ERR, shost,
+ "can_queue = 0 no longer supported\n");
goto fail;
}
+ if (shost_use_blk_mq(shost)) {
+ error = scsi_mq_setup_tags(shost);
+ if (error)
+ goto fail;
+ }
+
+ /*
+ * Note that we allocate the freelist even for the MQ case for now,
+ * as we need a command set aside for scsi_reset_provider. Having
+ * the full host freelist and one command available for that is a
+ * little heavy-handed, but avoids introducing a special allocator
+ * just for this. Eventually the structure of scsi_reset_provider
+ * will need a major overhaul.
+ */
error = scsi_setup_command_freelist(shost);
if (error)
- goto fail;
+ goto out_destroy_tags;
+
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
@@ -226,7 +241,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
error = device_add(&shost->shost_gendev);
if (error)
- goto out;
+ goto out_destroy_freelist;
pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev);
@@ -279,8 +294,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
device_del(&shost->shost_dev);
out_del_gendev:
device_del(&shost->shost_gendev);
- out:
+ out_destroy_freelist:
scsi_destroy_command_freelist(shost);
+ out_destroy_tags:
+ if (shost_use_blk_mq(shost))
+ scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -309,8 +327,13 @@ static void scsi_host_dev_release(struct device *dev)
}
scsi_destroy_command_freelist(shost);
- if (shost->bqt)
- blk_free_tags(shost->bqt);
+ if (shost_use_blk_mq(shost)) {
+ if (shost->tag_set.tags)
+ scsi_mq_destroy_tags(shost);
+ } else {
+ if (shost->bqt)
+ blk_free_tags(shost->bqt);
+ }
kfree(shost->shost_data);
@@ -436,6 +459,8 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
+ shost->use_blk_mq = scsi_use_blk_mq && !shost->hostt->disable_blk_mq;
+
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
@@ -450,8 +475,9 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
if (IS_ERR(shost->ehandler)) {
- printk(KERN_WARNING "scsi%d: error handler thread failed to spawn, error = %ld\n",
- shost->host_no, PTR_ERR(shost->ehandler));
+ shost_printk(KERN_WARNING, shost,
+ "error handler thread failed to spawn, error = %ld\n",
+ PTR_ERR(shost->ehandler));
goto fail_kfree;
}
@@ -584,7 +610,7 @@ EXPORT_SYMBOL(scsi_is_host_device);
int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
{
if (unlikely(!shost->work_q)) {
- printk(KERN_ERR
+ shost_printk(KERN_ERR, shost,
"ERROR: Scsi host '%s' attempted to queue scsi-work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
@@ -603,7 +629,7 @@ EXPORT_SYMBOL_GPL(scsi_queue_work);
void scsi_flush_work(struct Scsi_Host *shost)
{
if (!shost->work_q) {
- printk(KERN_ERR
+ shost_printk(KERN_ERR, shost,
"ERROR: Scsi host '%s' attempted to flush scsi-work, "
"when no workqueue created.\n", shost->hostt->name);
dump_stack();
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 31184b35370f..6b35d0dfe64c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1708,7 +1708,14 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result |= ei->ScsiStatus;
- /* copy the sense data whether we need to or not. */
+ scsi_set_resid(cmd, ei->ResidualCnt);
+ if (ei->CommandStatus == 0) {
+ cmd_free(h, cp);
+ cmd->scsi_done(cmd);
+ return;
+ }
+
+ /* copy the sense data */
if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
sense_data_size = SCSI_SENSE_BUFFERSIZE;
else
@@ -1717,13 +1724,6 @@ static void complete_scsi_command(struct CommandList *cp)
sense_data_size = ei->SenseLen;
memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
- scsi_set_resid(cmd, ei->ResidualCnt);
-
- if (ei->CommandStatus == 0) {
- cmd_free(h, cp);
- cmd->scsi_done(cmd);
- return;
- }
/* For I/O accelerator commands, copy over some fields to the normal
* CISS header used below for error handling.
@@ -3686,6 +3686,8 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
(((u64) cmd->cmnd[2]) << 8) |
cmd->cmnd[3];
block_cnt = cmd->cmnd[4];
+ if (block_cnt == 0)
+ block_cnt = 256;
break;
case WRITE_10:
is_write = 1;
@@ -3734,7 +3736,6 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
default:
return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
}
- BUG_ON(block_cnt == 0);
last_block = first_block + block_cnt - 1;
/* check for write to non-RAID-0 */
@@ -4590,7 +4591,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
return FAILED;
memset(msg, 0, sizeof(msg));
- ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
+ ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
h->scsi_host->host_no, sc->device->channel,
sc->device->id, sc->device->lun);
@@ -4731,23 +4732,21 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
union u64bit temp64;
dma_addr_t cmd_dma_handle, err_dma_handle;
- c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
+ c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
if (c == NULL)
return NULL;
- memset(c, 0, sizeof(*c));
c->cmd_type = CMD_SCSI;
c->cmdindex = -1;
- c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
- &err_dma_handle);
+ c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info),
+ &err_dma_handle);
if (c->err_info == NULL) {
pci_free_consistent(h->pdev,
sizeof(*c), c, cmd_dma_handle);
return NULL;
}
- memset(c->err_info, 0, sizeof(*c->err_info));
INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
@@ -5092,7 +5091,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
if (ioc->Request.Type.Direction & XFER_WRITE) {
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
- status = -ENOMEM;
+ status = -EFAULT;
goto cleanup1;
}
} else
@@ -6365,9 +6364,9 @@ static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
{
u32 driver_support;
-#ifdef CONFIG_X86
- /* Need to enable prefetch in the SCSI core for 6400 in x86 */
driver_support = readl(&(h->cfgtable->driver_support));
+ /* Need to enable prefetch in the SCSI core for 6400 in x86 */
+#ifdef CONFIG_X86
driver_support |= ENABLE_SCSI_PREFETCH;
#endif
driver_support |= ENABLE_UNIT_ATTN;
@@ -6913,8 +6912,12 @@ static int hpsa_offline_devices_ready(struct ctlr_info *h)
d = list_entry(this, struct offline_device_entry,
offline_list);
spin_unlock_irqrestore(&h->offline_device_lock, flags);
- if (!hpsa_volume_offline(h, d->scsi3addr))
+ if (!hpsa_volume_offline(h, d->scsi3addr)) {
+ spin_lock_irqsave(&h->offline_device_lock, flags);
+ list_del(&d->offline_list);
+ spin_unlock_irqrestore(&h->offline_device_lock, flags);
return 1;
+ }
spin_lock_irqsave(&h->offline_device_lock, flags);
}
spin_unlock_irqrestore(&h->offline_device_lock, flags);
@@ -6995,8 +6998,10 @@ reinit_after_soft_reset:
/* Allocate and clear per-cpu variable lockup_detected */
h->lockup_detected = alloc_percpu(u32);
- if (!h->lockup_detected)
+ if (!h->lockup_detected) {
+ rc = -ENOMEM;
goto clean1;
+ }
set_lockup_detected_for_all_cpus(h, 0);
rc = hpsa_pci_init(h);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index ee196b363d81..dedb62c21b29 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1024,7 +1024,7 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
_req->scp = scp;
- dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%08x-%08x-%08x-%08x) "
+ dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) "
"req_index=%d, req=%p\n",
scp,
host->host_no, scp->device->channel,
diff --git a/drivers/scsi/ibmvscsi/Makefile b/drivers/scsi/ibmvscsi/Makefile
index cb150d1e5850..3840c64f2966 100644
--- a/drivers/scsi/ibmvscsi/Makefile
+++ b/drivers/scsi/ibmvscsi/Makefile
@@ -1,3 +1,2 @@
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi.o
-obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvstgt.o
obj-$(CONFIG_SCSI_IBMVFC) += ibmvfc.o
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 8dd47689d584..598c42cba5a8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -46,7 +46,7 @@
static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
-static unsigned int max_lun = IBMVFC_MAX_LUN;
+static u64 max_lun = IBMVFC_MAX_LUN;
static unsigned int max_targets = IBMVFC_MAX_TARGETS;
static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(default_timeout,
module_param_named(max_requests, max_requests, uint, S_IRUGO);
MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
"[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
-module_param_named(max_lun, max_lun, uint, S_IRUGO);
+module_param_named(max_lun, max_lun, ullong, S_IRUGO);
MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
"[Default=" __stringify(IBMVFC_MAX_LUN) "]");
module_param_named(max_targets, max_targets, uint, S_IRUGO);
@@ -166,13 +166,13 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
entry->op_code = vfc_cmd->iu.cdb[0];
- entry->scsi_id = vfc_cmd->tgt_scsi_id;
+ entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
entry->tmf_flags = vfc_cmd->iu.tmf_flags;
- entry->u.start.xfer_len = vfc_cmd->iu.xfer_len;
+ entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len);
break;
case IBMVFC_MAD_FORMAT:
- entry->op_code = mad->opcode;
+ entry->op_code = be32_to_cpu(mad->opcode);
break;
default:
break;
@@ -199,18 +199,18 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
switch (entry->fmt) {
case IBMVFC_CMD_FORMAT:
entry->op_code = vfc_cmd->iu.cdb[0];
- entry->scsi_id = vfc_cmd->tgt_scsi_id;
+ entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
entry->tmf_flags = vfc_cmd->iu.tmf_flags;
- entry->u.end.status = vfc_cmd->status;
- entry->u.end.error = vfc_cmd->error;
+ entry->u.end.status = be16_to_cpu(vfc_cmd->status);
+ entry->u.end.error = be16_to_cpu(vfc_cmd->error);
entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
break;
case IBMVFC_MAD_FORMAT:
- entry->op_code = mad->opcode;
- entry->u.end.status = mad->status;
+ entry->op_code = be32_to_cpu(mad->opcode);
+ entry->u.end.status = be16_to_cpu(mad->status);
break;
default:
break;
@@ -270,14 +270,14 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
{
int err;
struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
- int fc_rsp_len = rsp->fcp_rsp_len;
+ int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
if ((rsp->flags & FCP_RSP_LEN_VALID) &&
((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
rsp->data.info.rsp_code))
return DID_ERROR << 16;
- err = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+ err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
if (err >= 0)
return rsp->scsi_status | (cmd_status[err].result << 16);
return rsp->scsi_status | (DID_ERROR << 16);
@@ -807,7 +807,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
evt->cmnd->result = (error_code << 16);
evt->done = ibmvfc_scsi_eh_done;
} else
- evt->xfer_iu->mad_common.status = IBMVFC_MAD_DRIVER_FAILED;
+ evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
list_del(&evt->queue);
del_timer(&evt->timer);
@@ -955,7 +955,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
spin_lock_irqsave(shost->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
- switch (vhost->login_buf->resp.link_speed / 100) {
+ switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
case 1:
fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
break;
@@ -976,7 +976,7 @@ static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
break;
default:
ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
- vhost->login_buf->resp.link_speed / 100);
+ be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
@@ -1171,21 +1171,21 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
memset(login_info, 0, sizeof(*login_info));
- login_info->ostype = IBMVFC_OS_LINUX;
- login_info->max_dma_len = IBMVFC_MAX_SECTORS << 9;
- login_info->max_payload = sizeof(struct ibmvfc_fcp_cmd_iu);
- login_info->max_response = sizeof(struct ibmvfc_fcp_rsp);
- login_info->partition_num = vhost->partition_number;
- login_info->vfc_frame_version = 1;
- login_info->fcp_version = 3;
- login_info->flags = IBMVFC_FLUSH_ON_HALT;
+ login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
+ login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
+ login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
+ login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
+ login_info->partition_num = cpu_to_be32(vhost->partition_number);
+ login_info->vfc_frame_version = cpu_to_be32(1);
+ login_info->fcp_version = cpu_to_be16(3);
+ login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
if (vhost->client_migrated)
- login_info->flags |= IBMVFC_CLIENT_MIGRATED;
+ login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
- login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
- login_info->capabilities = IBMVFC_CAN_MIGRATE;
- login_info->async.va = vhost->async_crq.msg_token;
- login_info->async.len = vhost->async_crq.size * sizeof(*vhost->async_crq.msgs);
+ login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
+ login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE);
+ login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
+ login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
strncpy(login_info->device_name,
dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
@@ -1225,7 +1225,7 @@ static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
struct ibmvfc_event *evt = &pool->events[i];
atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
- evt->crq.ioba = pool->iu_token + (sizeof(*evt->xfer_iu) * i);
+ evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
evt->xfer_iu = pool->iu_storage + i;
evt->vhost = vhost;
evt->ext_list = NULL;
@@ -1310,8 +1310,8 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
struct scatterlist *sg;
scsi_for_each_sg(scmd, sg, nseg, i) {
- md[i].va = sg_dma_address(sg);
- md[i].len = sg_dma_len(sg);
+ md[i].va = cpu_to_be64(sg_dma_address(sg));
+ md[i].len = cpu_to_be32(sg_dma_len(sg));
md[i].key = 0;
}
}
@@ -1337,7 +1337,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
sg_mapped = scsi_dma_map(scmd);
if (!sg_mapped) {
- vfc_cmd->flags |= IBMVFC_NO_MEM_DESC;
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
return 0;
} else if (unlikely(sg_mapped < 0)) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
@@ -1346,10 +1346,10 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
}
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
- vfc_cmd->flags |= IBMVFC_WRITE;
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
} else {
- vfc_cmd->flags |= IBMVFC_READ;
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
}
@@ -1358,7 +1358,7 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
return 0;
}
- vfc_cmd->flags |= IBMVFC_SCATTERLIST;
+ vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
if (!evt->ext_list) {
evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
@@ -1374,8 +1374,8 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
- data->va = evt->ext_list_token;
- data->len = sg_mapped * sizeof(struct srp_direct_buf);
+ data->va = cpu_to_be64(evt->ext_list_token);
+ data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
data->key = 0;
return 0;
}
@@ -1404,15 +1404,15 @@ static void ibmvfc_timeout(struct ibmvfc_event *evt)
static int ibmvfc_send_event(struct ibmvfc_event *evt,
struct ibmvfc_host *vhost, unsigned long timeout)
{
- u64 *crq_as_u64 = (u64 *) &evt->crq;
+ __be64 *crq_as_u64 = (__be64 *) &evt->crq;
int rc;
/* Copy the IU into the transfer area */
*evt->xfer_iu = evt->iu;
if (evt->crq.format == IBMVFC_CMD_FORMAT)
- evt->xfer_iu->cmd.tag = (u64)evt;
+ evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
else if (evt->crq.format == IBMVFC_MAD_FORMAT)
- evt->xfer_iu->mad_common.tag = (u64)evt;
+ evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
else
BUG();
@@ -1428,7 +1428,8 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
mb();
- if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
+ if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1])))) {
list_del(&evt->queue);
del_timer(&evt->timer);
@@ -1451,7 +1452,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->cmnd->result = DID_ERROR << 16;
evt->done = ibmvfc_scsi_eh_done;
} else
- evt->xfer_iu->mad_common.status = IBMVFC_MAD_CRQ_ERROR;
+ evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
evt->done(evt);
} else
@@ -1472,7 +1473,7 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
struct scsi_cmnd *cmnd = evt->cmnd;
const char *err = unknown_error;
- int index = ibmvfc_get_err_index(vfc_cmd->status, vfc_cmd->error);
+ int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
int logerr = 0;
int rsp_code = 0;
@@ -1526,13 +1527,13 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
struct scsi_cmnd *cmnd = evt->cmnd;
u32 rsp_len = 0;
- u32 sense_len = rsp->fcp_sense_len;
+ u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
if (cmnd) {
- if (vfc_cmd->response_flags & IBMVFC_ADAPTER_RESID_VALID)
- scsi_set_resid(cmnd, vfc_cmd->adapter_resid);
+ if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
+ scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
else if (rsp->flags & FCP_RESID_UNDER)
- scsi_set_resid(cmnd, rsp->fcp_resid);
+ scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
else
scsi_set_resid(cmnd, 0);
@@ -1540,12 +1541,13 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->result = ibmvfc_get_err_result(vfc_cmd);
if (rsp->flags & FCP_RSP_LEN_VALID)
- rsp_len = rsp->fcp_rsp_len;
+ rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
- if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
+ if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
+ (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
ibmvfc_relogin(cmnd->device);
if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
@@ -1630,19 +1632,19 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
cmnd->scsi_done = done;
vfc_cmd = &evt->iu.cmd;
memset(vfc_cmd, 0, sizeof(*vfc_cmd));
- vfc_cmd->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
- vfc_cmd->resp.len = sizeof(vfc_cmd->rsp);
- vfc_cmd->frame_type = IBMVFC_SCSI_FCP_TYPE;
- vfc_cmd->payload_len = sizeof(vfc_cmd->iu);
- vfc_cmd->resp_len = sizeof(vfc_cmd->rsp);
- vfc_cmd->cancel_key = (unsigned long)cmnd->device->hostdata;
- vfc_cmd->tgt_scsi_id = rport->port_id;
- vfc_cmd->iu.xfer_len = scsi_bufflen(cmnd);
+ vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
+ vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp));
+ vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu));
+ vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp));
+ vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata);
+ vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
+ vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
if (scsi_populate_tag_msg(cmnd, tag)) {
- vfc_cmd->task_tag = tag[1];
+ vfc_cmd->task_tag = cpu_to_be64(tag[1]);
switch (tag[0]) {
case MSG_SIMPLE_TAG:
vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
@@ -1732,12 +1734,12 @@ static int ibmvfc_bsg_timeout(struct fc_bsg_job *job)
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = 1;
- tmf->common.opcode = IBMVFC_TMF_MAD;
- tmf->common.length = sizeof(*tmf);
- tmf->scsi_id = port_id;
- tmf->cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
- tmf->my_cancel_key = IBMVFC_INTERNAL_CANCEL_KEY;
+ tmf->common.version = cpu_to_be32(1);
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(port_id);
+ tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
+ tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
rc = ibmvfc_send_event(evt, vhost, default_timeout);
if (rc != 0) {
@@ -1789,10 +1791,10 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
- plogi->common.version = 1;
- plogi->common.opcode = IBMVFC_PORT_LOGIN;
- plogi->common.length = sizeof(*plogi);
- plogi->scsi_id = port_id;
+ plogi->common.version = cpu_to_be32(1);
+ plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
+ plogi->common.length = cpu_to_be16(sizeof(*plogi));
+ plogi->scsi_id = cpu_to_be64(port_id);
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
@@ -1904,26 +1906,26 @@ static int ibmvfc_bsg_request(struct fc_bsg_job *job)
mad = &evt->iu.passthru;
memset(mad, 0, sizeof(*mad));
- mad->common.version = 1;
- mad->common.opcode = IBMVFC_PASSTHRU;
- mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
-
- mad->cmd_ioba.va = (u64)evt->crq.ioba +
- offsetof(struct ibmvfc_passthru_mad, iu);
- mad->cmd_ioba.len = sizeof(mad->iu);
-
- mad->iu.cmd_len = job->request_payload.payload_len;
- mad->iu.rsp_len = job->reply_payload.payload_len;
- mad->iu.flags = fc_flags;
- mad->iu.cancel_key = IBMVFC_PASSTHRU_CANCEL_KEY;
-
- mad->iu.cmd.va = sg_dma_address(job->request_payload.sg_list);
- mad->iu.cmd.len = sg_dma_len(job->request_payload.sg_list);
- mad->iu.rsp.va = sg_dma_address(job->reply_payload.sg_list);
- mad->iu.rsp.len = sg_dma_len(job->reply_payload.sg_list);
- mad->iu.scsi_id = port_id;
- mad->iu.tag = (u64)evt;
- rsp_len = mad->iu.rsp.len;
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
+ mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
+
+ mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
+ offsetof(struct ibmvfc_passthru_mad, iu));
+ mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
+
+ mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
+ mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
+ mad->iu.flags = cpu_to_be32(fc_flags);
+ mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
+
+ mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
+ mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
+ mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
+ mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
+ mad->iu.scsi_id = cpu_to_be64(port_id);
+ mad->iu.tag = cpu_to_be64((u64)evt);
+ rsp_len = be32_to_cpu(mad->iu.rsp.len);
evt->sync_iu = &rsp_iu;
init_completion(&evt->comp);
@@ -1986,15 +1988,15 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
tmf = &evt->iu.cmd;
memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
- tmf->resp.len = sizeof(tmf->rsp);
- tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
- tmf->payload_len = sizeof(tmf->iu);
- tmf->resp_len = sizeof(tmf->rsp);
- tmf->cancel_key = (unsigned long)sdev->hostdata;
- tmf->tgt_scsi_id = rport->port_id;
+ tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
+ tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
+ tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
int_to_scsilun(sdev->lun, &tmf->iu.lun);
- tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+ tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
tmf->iu.tmf_flags = type;
evt->sync_iu = &rsp_iu;
@@ -2020,8 +2022,8 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
rsp_code = fc_rsp->data.info.rsp_code;
sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
- "flags: %x fcp_rsp: %x, scsi_status: %x\n",
- desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+ "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
@@ -2185,19 +2187,19 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = 1;
- tmf->common.opcode = IBMVFC_TMF_MAD;
- tmf->common.length = sizeof(*tmf);
- tmf->scsi_id = rport->port_id;
+ tmf->common.version = cpu_to_be32(1);
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(rport->port_id);
int_to_scsilun(sdev->lun, &tmf->lun);
- if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS))
+ if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS))
type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
if (vhost->state == IBMVFC_ACTIVE)
- tmf->flags = (type | IBMVFC_TMF_LUA_VALID);
+ tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
else
- tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID);
- tmf->cancel_key = (unsigned long)sdev->hostdata;
- tmf->my_cancel_key = (unsigned long)starget->hostdata;
+ tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
evt->sync_iu = &rsp;
init_completion(&evt->comp);
@@ -2217,7 +2219,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
wait_for_completion(&evt->comp);
- status = rsp.mad_common.status;
+ status = be16_to_cpu(rsp.mad_common.status);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_free_event(evt);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
@@ -2252,7 +2254,7 @@ static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
unsigned long cancel_key = (unsigned long)key;
if (evt->crq.format == IBMVFC_CMD_FORMAT &&
- evt->iu.cmd.cancel_key == cancel_key)
+ be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
return 1;
return 0;
}
@@ -2316,15 +2318,15 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
tmf = &evt->iu.cmd;
memset(tmf, 0, sizeof(*tmf));
- tmf->resp.va = (u64)evt->crq.ioba + offsetof(struct ibmvfc_cmd, rsp);
- tmf->resp.len = sizeof(tmf->rsp);
- tmf->frame_type = IBMVFC_SCSI_FCP_TYPE;
- tmf->payload_len = sizeof(tmf->iu);
- tmf->resp_len = sizeof(tmf->rsp);
- tmf->cancel_key = (unsigned long)sdev->hostdata;
- tmf->tgt_scsi_id = rport->port_id;
+ tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
+ tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
+ tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
+ tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
int_to_scsilun(sdev->lun, &tmf->iu.lun);
- tmf->flags = (IBMVFC_NO_MEM_DESC | IBMVFC_TMF);
+ tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
evt->sync_iu = &rsp_iu;
@@ -2380,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
"flags: %x fcp_rsp: %x, scsi_status: %x\n",
- ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
fc_rsp->scsi_status);
rsp_rc = -EIO;
@@ -2641,14 +2643,14 @@ static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
struct ibmvfc_host *vhost)
{
- const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(crq->event);
+ const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
struct ibmvfc_target *tgt;
ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
" node_name: %llx%s\n", desc->desc, crq->scsi_id, crq->wwpn, crq->node_name,
ibmvfc_get_link_state(crq->link_state));
- switch (crq->event) {
+ switch (be64_to_cpu(crq->event)) {
case IBMVFC_AE_RESUME:
switch (crq->link_state) {
case IBMVFC_AE_LS_LINK_DOWN:
@@ -2691,15 +2693,15 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
list_for_each_entry(tgt, &vhost->targets, queue) {
if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
break;
- if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
+ if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
continue;
- if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
+ if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
continue;
- if (crq->node_name && tgt->ids.node_name != crq->node_name)
+ if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
continue;
- if (tgt->need_login && crq->event == IBMVFC_AE_ELS_LOGO)
+ if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
tgt->logo_rcvd = 1;
- if (!tgt->need_login || crq->event == IBMVFC_AE_ELS_PLOGI) {
+ if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
ibmvfc_reinit_host(vhost);
}
@@ -2730,7 +2732,7 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
{
long rc;
- struct ibmvfc_event *evt = (struct ibmvfc_event *)crq->ioba;
+ struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
switch (crq->valid) {
case IBMVFC_CRQ_INIT_RSP:
@@ -3336,7 +3338,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
- u32 status = rsp->common.status;
+ u32 status = be16_to_cpu(rsp->common.status);
int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
@@ -3347,14 +3349,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
parms->type, parms->flags, parms->service_parms);
if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
- index = ibmvfc_get_prli_rsp(parms->flags);
+ index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
if (prli_rsp[index].logged_in) {
- if (parms->flags & IBMVFC_PRLI_EST_IMG_PAIR) {
+ if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
tgt->need_login = 0;
tgt->ids.roles = 0;
- if (parms->service_parms & IBMVFC_PRLI_TARGET_FUNC)
+ if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
- if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
+ if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
tgt->add_rport = 1;
} else
@@ -3373,17 +3375,18 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
- if ((rsp->status & IBMVFC_VIOS_FAILURE) && rsp->error == IBMVFC_PLOGI_REQUIRED)
+ if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
+ be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else if (tgt->logo_rcvd)
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
- else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error),
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
rsp->status, rsp->error, status);
break;
};
@@ -3414,14 +3417,14 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
evt->tgt = tgt;
prli = &evt->iu.prli;
memset(prli, 0, sizeof(*prli));
- prli->common.version = 1;
- prli->common.opcode = IBMVFC_PROCESS_LOGIN;
- prli->common.length = sizeof(*prli);
- prli->scsi_id = tgt->scsi_id;
+ prli->common.version = cpu_to_be32(1);
+ prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
+ prli->common.length = cpu_to_be16(sizeof(*prli));
+ prli->scsi_id = cpu_to_be64(tgt->scsi_id);
prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
- prli->parms.flags = IBMVFC_PRLI_EST_IMG_PAIR;
- prli->parms.service_parms = IBMVFC_PRLI_INITIATOR_FUNC;
+ prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
+ prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -3442,7 +3445,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
- u32 status = rsp->common.status;
+ u32 status = be16_to_cpu(rsp->common.status);
int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
@@ -3472,15 +3475,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
- if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
- ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
- ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
+ ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
+ ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
break;
};
@@ -3512,10 +3515,10 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
evt->tgt = tgt;
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
- plogi->common.version = 1;
- plogi->common.opcode = IBMVFC_PORT_LOGIN;
- plogi->common.length = sizeof(*plogi);
- plogi->scsi_id = tgt->scsi_id;
+ plogi->common.version = cpu_to_be32(1);
+ plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
+ plogi->common.length = cpu_to_be16(sizeof(*plogi));
+ plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
vhost->discovery_threads--;
@@ -3535,7 +3538,7 @@ static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
- u32 status = rsp->common.status;
+ u32 status = be16_to_cpu(rsp->common.status);
vhost->discovery_threads--;
ibmvfc_free_event(evt);
@@ -3585,10 +3588,10 @@ static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
evt->tgt = tgt;
mad = &evt->iu.implicit_logout;
memset(mad, 0, sizeof(*mad));
- mad->common.version = 1;
- mad->common.opcode = IBMVFC_IMPLICIT_LOGOUT;
- mad->common.length = sizeof(*mad);
- mad->old_scsi_id = tgt->scsi_id;
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+ mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -3616,7 +3619,7 @@ static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
if (memcmp(&mad->fc_iu.response[4], &tgt->ids.node_name,
sizeof(tgt->ids.node_name)))
return 1;
- if (mad->fc_iu.response[6] != tgt->scsi_id)
+ if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
return 1;
return 0;
}
@@ -3631,7 +3634,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
- u32 status = mad->common.status;
+ u32 status = be16_to_cpu(mad->common.status);
u8 fc_reason, fc_explain;
vhost->discovery_threads--;
@@ -3649,10 +3652,10 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
case IBMVFC_MAD_FAILED:
default:
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
- fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
- fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
+ fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
+ fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(mad->iu.status, mad->iu.error),
+ ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
mad->iu.status, mad->iu.error,
ibmvfc_get_fc_type(fc_reason), fc_reason,
ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
@@ -3674,22 +3677,22 @@ static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
memset(mad, 0, sizeof(*mad));
- mad->common.version = 1;
- mad->common.opcode = IBMVFC_PASSTHRU;
- mad->common.length = sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu);
- mad->cmd_ioba.va = (u64)evt->crq.ioba +
- offsetof(struct ibmvfc_passthru_mad, iu);
- mad->cmd_ioba.len = sizeof(mad->iu);
- mad->iu.cmd_len = sizeof(mad->fc_iu.payload);
- mad->iu.rsp_len = sizeof(mad->fc_iu.response);
- mad->iu.cmd.va = (u64)evt->crq.ioba +
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
+ mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
+ mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
+ offsetof(struct ibmvfc_passthru_mad, iu));
+ mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
+ mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
+ mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
+ mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
offsetof(struct ibmvfc_passthru_mad, fc_iu) +
- offsetof(struct ibmvfc_passthru_fc_iu, payload);
- mad->iu.cmd.len = sizeof(mad->fc_iu.payload);
- mad->iu.rsp.va = (u64)evt->crq.ioba +
+ offsetof(struct ibmvfc_passthru_fc_iu, payload));
+ mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
+ mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
offsetof(struct ibmvfc_passthru_mad, fc_iu) +
- offsetof(struct ibmvfc_passthru_fc_iu, response);
- mad->iu.rsp.len = sizeof(mad->fc_iu.response);
+ offsetof(struct ibmvfc_passthru_fc_iu, response));
+ mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
}
/**
@@ -3748,11 +3751,11 @@ static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt)
evt->tgt = tgt;
tmf = &evt->iu.tmf;
memset(tmf, 0, sizeof(*tmf));
- tmf->common.version = 1;
- tmf->common.opcode = IBMVFC_TMF_MAD;
- tmf->common.length = sizeof(*tmf);
- tmf->scsi_id = tgt->scsi_id;
- tmf->cancel_key = tgt->cancel_key;
+ tmf->common.version = cpu_to_be32(1);
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
+ tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
rc = ibmvfc_send_event(evt, vhost, default_timeout);
@@ -3794,16 +3797,16 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
ibmvfc_init_passthru(evt);
mad = &evt->iu.passthru;
- mad->iu.flags = IBMVFC_FC_ELS;
- mad->iu.scsi_id = tgt->scsi_id;
- mad->iu.cancel_key = tgt->cancel_key;
+ mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
+ mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
+ mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
- mad->fc_iu.payload[0] = IBMVFC_ADISC;
+ mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
sizeof(vhost->login_buf->resp.port_name));
memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
sizeof(vhost->login_buf->resp.node_name));
- mad->fc_iu.payload[6] = vhost->login_buf->resp.scsi_id & 0x00ffffff;
+ mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
if (timer_pending(&tgt->timer))
mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
@@ -3834,7 +3837,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
struct ibmvfc_target *tgt = evt->tgt;
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
- u32 status = rsp->common.status;
+ u32 status = be16_to_cpu(rsp->common.status);
int level = IBMVFC_DEFAULT_LOG_LEVEL;
vhost->discovery_threads--;
@@ -3842,8 +3845,8 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
switch (status) {
case IBMVFC_MAD_SUCCESS:
tgt_dbg(tgt, "Query Target succeeded\n");
- tgt->new_scsi_id = rsp->scsi_id;
- if (rsp->scsi_id != tgt->scsi_id)
+ tgt->new_scsi_id = be64_to_cpu(rsp->scsi_id);
+ if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
else
ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
@@ -3855,19 +3858,20 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
break;
case IBMVFC_MAD_FAILED:
default:
- if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
- rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
- rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
+ if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
+ be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
+ be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
- else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
else
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
- ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
- ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
+ rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
+ rsp->fc_explain, status);
break;
};
@@ -3897,10 +3901,10 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
query_tgt = &evt->iu.query_tgt;
memset(query_tgt, 0, sizeof(*query_tgt));
- query_tgt->common.version = 1;
- query_tgt->common.opcode = IBMVFC_QUERY_TARGET;
- query_tgt->common.length = sizeof(*query_tgt);
- query_tgt->wwpn = tgt->ids.port_name;
+ query_tgt->common.version = cpu_to_be32(1);
+ query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
+ query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
+ query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
@@ -3971,7 +3975,8 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
rc = ibmvfc_alloc_target(vhost,
- vhost->disc_buf->scsi_id[i] & IBMVFC_DISC_TGT_SCSI_ID_MASK);
+ be32_to_cpu(vhost->disc_buf->scsi_id[i]) &
+ IBMVFC_DISC_TGT_SCSI_ID_MASK);
return rc;
}
@@ -3985,19 +3990,20 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
- u32 mad_status = rsp->common.status;
+ u32 mad_status = be16_to_cpu(rsp->common.status);
int level = IBMVFC_DEFAULT_LOG_LEVEL;
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
- vhost->num_targets = rsp->num_written;
+ vhost->num_targets = be32_to_cpu(rsp->num_written);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
break;
case IBMVFC_MAD_FAILED:
level += ibmvfc_retry_host_init(vhost);
ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ rsp->status, rsp->error);
break;
case IBMVFC_MAD_DRIVER_FAILED:
break;
@@ -4024,12 +4030,12 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.discover_targets;
memset(mad, 0, sizeof(*mad));
- mad->common.version = 1;
- mad->common.opcode = IBMVFC_DISC_TARGETS;
- mad->common.length = sizeof(*mad);
- mad->bufflen = vhost->disc_buf_sz;
- mad->buffer.va = vhost->disc_buf_dma;
- mad->buffer.len = vhost->disc_buf_sz;
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+ mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
+ mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
+ mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
@@ -4046,7 +4052,7 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
- u32 mad_status = evt->xfer_iu->npiv_login.common.status;
+ u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
unsigned int npiv_max_sectors;
int level = IBMVFC_DEFAULT_LOG_LEVEL;
@@ -4056,12 +4062,13 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
ibmvfc_free_event(evt);
break;
case IBMVFC_MAD_FAILED:
- if (ibmvfc_retry_cmd(rsp->status, rsp->error))
+ if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
level += ibmvfc_retry_host_init(vhost);
else
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
- ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
+ ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+ rsp->status, rsp->error);
ibmvfc_free_event(evt);
return;
case IBMVFC_MAD_CRQ_ERROR:
@@ -4078,7 +4085,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
vhost->client_migrated = 0;
- if (!(rsp->flags & IBMVFC_NATIVE_FC)) {
+ if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
rsp->flags);
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
@@ -4086,7 +4093,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
return;
}
- if (rsp->max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
+ if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
rsp->max_cmds);
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
@@ -4095,27 +4102,27 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
}
vhost->logged_in = 1;
- npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
+ npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
rsp->partition_name, rsp->device_name, rsp->port_loc_code,
rsp->drc_name, npiv_max_sectors);
- fc_host_fabric_name(vhost->host) = rsp->node_name;
- fc_host_node_name(vhost->host) = rsp->node_name;
- fc_host_port_name(vhost->host) = rsp->port_name;
- fc_host_port_id(vhost->host) = rsp->scsi_id;
+ fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
+ fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
+ fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
+ fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
fc_host_supported_classes(vhost->host) = 0;
- if (rsp->service_parms.class1_parms[0] & 0x80000000)
+ if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
- if (rsp->service_parms.class2_parms[0] & 0x80000000)
+ if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
- if (rsp->service_parms.class3_parms[0] & 0x80000000)
+ if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
fc_host_maxframe_size(vhost->host) =
- rsp->service_parms.common.bb_rcv_sz & 0x0fff;
+ be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
- vhost->host->can_queue = rsp->max_cmds - IBMVFC_NUM_INTERNAL_REQ;
+ vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
vhost->host->max_sectors = npiv_max_sectors;
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
wake_up(&vhost->work_wait_q);
@@ -4138,11 +4145,11 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
mad = &evt->iu.npiv_login;
memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
- mad->common.version = 1;
- mad->common.opcode = IBMVFC_NPIV_LOGIN;
- mad->common.length = sizeof(struct ibmvfc_npiv_login_mad);
- mad->buffer.va = vhost->login_buf_dma;
- mad->buffer.len = sizeof(*vhost->login_buf);
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
+ mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
+ mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
+ mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
@@ -4160,7 +4167,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
{
struct ibmvfc_host *vhost = evt->vhost;
- u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
+ u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
ibmvfc_free_event(evt);
@@ -4199,9 +4206,9 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
mad = &evt->iu.npiv_logout;
memset(mad, 0, sizeof(*mad));
- mad->common.version = 1;
- mad->common.opcode = IBMVFC_NPIV_LOGOUT;
- mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
+ mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
@@ -4343,14 +4350,14 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
if (rport) {
tgt_dbg(tgt, "rport add succeeded\n");
tgt->rport = rport;
- rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
+ rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
rport->supported_classes = 0;
tgt->target_id = rport->scsi_target_id;
- if (tgt->service_parms.class1_parms[0] & 0x80000000)
+ if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
rport->supported_classes |= FC_COS_CLASS1;
- if (tgt->service_parms.class2_parms[0] & 0x80000000)
+ if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
rport->supported_classes |= FC_COS_CLASS2;
- if (tgt->service_parms.class3_parms[0] & 0x80000000)
+ if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
rport->supported_classes |= FC_COS_CLASS3;
if (rport->rqst_q)
blk_queue_max_segments(rport->rqst_q, 1);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 017a5290e8c1..8fae03215a85 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -135,12 +135,12 @@ enum ibmvfc_mad_types {
};
struct ibmvfc_mad_common {
- u32 version;
- u32 reserved;
- u32 opcode;
- u16 status;
- u16 length;
- u64 tag;
+ __be32 version;
+ __be32 reserved;
+ __be32 opcode;
+ __be16 status;
+ __be16 length;
+ __be64 tag;
}__attribute__((packed, aligned (8)));
struct ibmvfc_npiv_login_mad {
@@ -155,76 +155,76 @@ struct ibmvfc_npiv_logout_mad {
#define IBMVFC_MAX_NAME 256
struct ibmvfc_npiv_login {
- u32 ostype;
+ __be32 ostype;
#define IBMVFC_OS_LINUX 0x02
- u32 pad;
- u64 max_dma_len;
- u32 max_payload;
- u32 max_response;
- u32 partition_num;
- u32 vfc_frame_version;
- u16 fcp_version;
- u16 flags;
+ __be32 pad;
+ __be64 max_dma_len;
+ __be32 max_payload;
+ __be32 max_response;
+ __be32 partition_num;
+ __be32 vfc_frame_version;
+ __be16 fcp_version;
+ __be16 flags;
#define IBMVFC_CLIENT_MIGRATED 0x01
#define IBMVFC_FLUSH_ON_HALT 0x02
- u32 max_cmds;
- u64 capabilities;
+ __be32 max_cmds;
+ __be64 capabilities;
#define IBMVFC_CAN_MIGRATE 0x01
- u64 node_name;
+ __be64 node_name;
struct srp_direct_buf async;
u8 partition_name[IBMVFC_MAX_NAME];
u8 device_name[IBMVFC_MAX_NAME];
u8 drc_name[IBMVFC_MAX_NAME];
- u64 reserved2[2];
+ __be64 reserved2[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_common_svc_parms {
- u16 fcph_version;
- u16 b2b_credit;
- u16 features;
- u16 bb_rcv_sz; /* upper nibble is BB_SC_N */
- u32 ratov;
- u32 edtov;
+ __be16 fcph_version;
+ __be16 b2b_credit;
+ __be16 features;
+ __be16 bb_rcv_sz; /* upper nibble is BB_SC_N */
+ __be32 ratov;
+ __be32 edtov;
}__attribute__((packed, aligned (4)));
struct ibmvfc_service_parms {
struct ibmvfc_common_svc_parms common;
u8 port_name[8];
u8 node_name[8];
- u32 class1_parms[4];
- u32 class2_parms[4];
- u32 class3_parms[4];
- u32 obsolete[4];
- u32 vendor_version[4];
- u32 services_avail[2];
- u32 ext_len;
- u32 reserved[30];
- u32 clk_sync_qos[2];
+ __be32 class1_parms[4];
+ __be32 class2_parms[4];
+ __be32 class3_parms[4];
+ __be32 obsolete[4];
+ __be32 vendor_version[4];
+ __be32 services_avail[2];
+ __be32 ext_len;
+ __be32 reserved[30];
+ __be32 clk_sync_qos[2];
}__attribute__((packed, aligned (4)));
struct ibmvfc_npiv_login_resp {
- u32 version;
- u16 status;
- u16 error;
- u32 flags;
+ __be32 version;
+ __be16 status;
+ __be16 error;
+ __be32 flags;
#define IBMVFC_NATIVE_FC 0x01
- u32 reserved;
- u64 capabilities;
+ __be32 reserved;
+ __be64 capabilities;
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
- u32 max_cmds;
- u32 scsi_id_sz;
- u64 max_dma_len;
- u64 scsi_id;
- u64 port_name;
- u64 node_name;
- u64 link_speed;
+ __be32 max_cmds;
+ __be32 scsi_id_sz;
+ __be64 max_dma_len;
+ __be64 scsi_id;
+ __be64 port_name;
+ __be64 node_name;
+ __be64 link_speed;
u8 partition_name[IBMVFC_MAX_NAME];
u8 device_name[IBMVFC_MAX_NAME];
u8 port_loc_code[IBMVFC_MAX_NAME];
u8 drc_name[IBMVFC_MAX_NAME];
struct ibmvfc_service_parms service_parms;
- u64 reserved2;
+ __be64 reserved2;
}__attribute__((packed, aligned (8)));
union ibmvfc_npiv_login_data {
@@ -233,20 +233,20 @@ union ibmvfc_npiv_login_data {
}__attribute__((packed, aligned (8)));
struct ibmvfc_discover_targets_buf {
- u32 scsi_id[1];
+ __be32 scsi_id[1];
#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
};
struct ibmvfc_discover_targets {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
- u32 flags;
- u16 status;
- u16 error;
- u32 bufflen;
- u32 num_avail;
- u32 num_written;
- u64 reserved[2];
+ __be32 flags;
+ __be16 status;
+ __be16 error;
+ __be32 bufflen;
+ __be32 num_avail;
+ __be32 num_written;
+ __be64 reserved[2];
}__attribute__((packed, aligned (8)));
enum ibmvfc_fc_reason {
@@ -278,32 +278,32 @@ enum ibmvfc_gs_explain {
struct ibmvfc_port_login {
struct ibmvfc_mad_common common;
- u64 scsi_id;
- u16 reserved;
- u16 fc_service_class;
- u32 blksz;
- u32 hdr_per_blk;
- u16 status;
- u16 error; /* also fc_reason */
- u16 fc_explain;
- u16 fc_type;
- u32 reserved2;
+ __be64 scsi_id;
+ __be16 reserved;
+ __be16 fc_service_class;
+ __be32 blksz;
+ __be32 hdr_per_blk;
+ __be16 status;
+ __be16 error; /* also fc_reason */
+ __be16 fc_explain;
+ __be16 fc_type;
+ __be32 reserved2;
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
- u64 reserved3[2];
+ __be64 reserved3[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_prli_svc_parms {
u8 type;
#define IBMVFC_SCSI_FCP_TYPE 0x08
u8 type_ext;
- u16 flags;
+ __be16 flags;
#define IBMVFC_PRLI_ORIG_PA_VALID 0x8000
#define IBMVFC_PRLI_RESP_PA_VALID 0x4000
#define IBMVFC_PRLI_EST_IMG_PAIR 0x2000
- u32 orig_pa;
- u32 resp_pa;
- u32 service_parms;
+ __be32 orig_pa;
+ __be32 resp_pa;
+ __be32 service_parms;
#define IBMVFC_PRLI_TASK_RETRY 0x00000200
#define IBMVFC_PRLI_RETRY 0x00000100
#define IBMVFC_PRLI_DATA_OVERLAY 0x00000040
@@ -315,47 +315,47 @@ struct ibmvfc_prli_svc_parms {
struct ibmvfc_process_login {
struct ibmvfc_mad_common common;
- u64 scsi_id;
+ __be64 scsi_id;
struct ibmvfc_prli_svc_parms parms;
u8 reserved[48];
- u16 status;
- u16 error; /* also fc_reason */
- u32 reserved2;
- u64 reserved3[2];
+ __be16 status;
+ __be16 error; /* also fc_reason */
+ __be32 reserved2;
+ __be64 reserved3[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_query_tgt {
struct ibmvfc_mad_common common;
- u64 wwpn;
- u64 scsi_id;
- u16 status;
- u16 error;
- u16 fc_explain;
- u16 fc_type;
- u64 reserved[2];
+ __be64 wwpn;
+ __be64 scsi_id;
+ __be16 status;
+ __be16 error;
+ __be16 fc_explain;
+ __be16 fc_type;
+ __be64 reserved[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_implicit_logout {
struct ibmvfc_mad_common common;
- u64 old_scsi_id;
- u64 reserved[2];
+ __be64 old_scsi_id;
+ __be64 reserved[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_tmf {
struct ibmvfc_mad_common common;
- u64 scsi_id;
+ __be64 scsi_id;
struct scsi_lun lun;
- u32 flags;
+ __be32 flags;
#define IBMVFC_TMF_ABORT_TASK 0x02
#define IBMVFC_TMF_ABORT_TASK_SET 0x04
#define IBMVFC_TMF_LUN_RESET 0x10
#define IBMVFC_TMF_TGT_RESET 0x20
#define IBMVFC_TMF_LUA_VALID 0x40
#define IBMVFC_TMF_SUPPRESS_ABTS 0x80
- u32 cancel_key;
- u32 my_cancel_key;
- u32 pad;
- u64 reserved[2];
+ __be32 cancel_key;
+ __be32 my_cancel_key;
+ __be32 pad;
+ __be64 reserved[2];
}__attribute__((packed, aligned (8)));
enum ibmvfc_fcp_rsp_info_codes {
@@ -366,7 +366,7 @@ enum ibmvfc_fcp_rsp_info_codes {
};
struct ibmvfc_fcp_rsp_info {
- u16 reserved;
+ __be16 reserved;
u8 rsp_code;
u8 reserved2[4];
}__attribute__((packed, aligned (2)));
@@ -388,13 +388,13 @@ union ibmvfc_fcp_rsp_data {
}__attribute__((packed, aligned (8)));
struct ibmvfc_fcp_rsp {
- u64 reserved;
- u16 retry_delay_timer;
+ __be64 reserved;
+ __be16 retry_delay_timer;
u8 flags;
u8 scsi_status;
- u32 fcp_resid;
- u32 fcp_sense_len;
- u32 fcp_rsp_len;
+ __be32 fcp_resid;
+ __be32 fcp_sense_len;
+ __be32 fcp_rsp_len;
union ibmvfc_fcp_rsp_data data;
}__attribute__((packed, aligned (8)));
@@ -429,58 +429,58 @@ struct ibmvfc_fcp_cmd_iu {
#define IBMVFC_RDDATA 0x02
#define IBMVFC_WRDATA 0x01
u8 cdb[IBMVFC_MAX_CDB_LEN];
- u32 xfer_len;
+ __be32 xfer_len;
}__attribute__((packed, aligned (4)));
struct ibmvfc_cmd {
- u64 task_tag;
- u32 frame_type;
- u32 payload_len;
- u32 resp_len;
- u32 adapter_resid;
- u16 status;
- u16 error;
- u16 flags;
- u16 response_flags;
+ __be64 task_tag;
+ __be32 frame_type;
+ __be32 payload_len;
+ __be32 resp_len;
+ __be32 adapter_resid;
+ __be16 status;
+ __be16 error;
+ __be16 flags;
+ __be16 response_flags;
#define IBMVFC_ADAPTER_RESID_VALID 0x01
- u32 cancel_key;
- u32 exchange_id;
+ __be32 cancel_key;
+ __be32 exchange_id;
struct srp_direct_buf ext_func;
struct srp_direct_buf ioba;
struct srp_direct_buf resp;
- u64 correlation;
- u64 tgt_scsi_id;
- u64 tag;
- u64 reserved3[2];
+ __be64 correlation;
+ __be64 tgt_scsi_id;
+ __be64 tag;
+ __be64 reserved3[2];
struct ibmvfc_fcp_cmd_iu iu;
struct ibmvfc_fcp_rsp rsp;
}__attribute__((packed, aligned (8)));
struct ibmvfc_passthru_fc_iu {
- u32 payload[7];
+ __be32 payload[7];
#define IBMVFC_ADISC 0x52000000
- u32 response[7];
+ __be32 response[7];
};
struct ibmvfc_passthru_iu {
- u64 task_tag;
- u32 cmd_len;
- u32 rsp_len;
- u16 status;
- u16 error;
- u32 flags;
+ __be64 task_tag;
+ __be32 cmd_len;
+ __be32 rsp_len;
+ __be16 status;
+ __be16 error;
+ __be32 flags;
#define IBMVFC_FC_ELS 0x01
#define IBMVFC_FC_CT_IU 0x02
- u32 cancel_key;
+ __be32 cancel_key;
#define IBMVFC_PASSTHRU_CANCEL_KEY 0x80000000
#define IBMVFC_INTERNAL_CANCEL_KEY 0x80000001
- u32 reserved;
+ __be32 reserved;
struct srp_direct_buf cmd;
struct srp_direct_buf rsp;
- u64 correlation;
- u64 scsi_id;
- u64 tag;
- u64 reserved2[2];
+ __be64 correlation;
+ __be64 scsi_id;
+ __be64 tag;
+ __be64 reserved2[2];
}__attribute__((packed, aligned (8)));
struct ibmvfc_passthru_mad {
@@ -552,7 +552,7 @@ struct ibmvfc_crq {
volatile u8 valid;
volatile u8 format;
u8 reserved[6];
- volatile u64 ioba;
+ volatile __be64 ioba;
}__attribute__((packed, aligned (8)));
struct ibmvfc_crq_queue {
@@ -572,12 +572,12 @@ struct ibmvfc_async_crq {
volatile u8 valid;
u8 link_state;
u8 pad[2];
- u32 pad2;
- volatile u64 event;
- volatile u64 scsi_id;
- volatile u64 wwpn;
- volatile u64 node_name;
- u64 reserved;
+ __be32 pad2;
+ volatile __be64 event;
+ volatile __be64 scsi_id;
+ volatile __be64 wwpn;
+ volatile __be64 node_name;
+ __be64 reserved;
}__attribute__((packed, aligned (8)));
struct ibmvfc_async_crq_queue {
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
deleted file mode 100644
index 56f8a861ed72..000000000000
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ /dev/null
@@ -1,1001 +0,0 @@
-/*
- * IBM eServer i/pSeries Virtual SCSI Target Driver
- * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
- * Santiago Leon (santil@us.ibm.com) IBM Corp.
- * Linda Xie (lxie@us.ibm.com) IBM Corp.
- *
- * Copyright (C) 2005-2006 FUJITA Tomonori <tomof@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
- * USA
- */
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_transport_srp.h>
-#include <scsi/scsi_tgt.h>
-#include <scsi/libsrp.h>
-#include <asm/hvcall.h>
-#include <asm/iommu.h>
-#include <asm/prom.h>
-#include <asm/vio.h>
-
-#include "ibmvscsi.h"
-
-#define INITIAL_SRP_LIMIT 16
-#define DEFAULT_MAX_SECTORS 256
-
-#define TGT_NAME "ibmvstgt"
-
-/*
- * Hypervisor calls.
- */
-#define h_copy_rdma(l, sa, sb, da, db) \
- plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db)
-#define h_send_crq(ua, l, h) \
- plpar_hcall_norets(H_SEND_CRQ, ua, l, h)
-#define h_reg_crq(ua, tok, sz)\
- plpar_hcall_norets(H_REG_CRQ, ua, tok, sz);
-#define h_free_crq(ua) \
- plpar_hcall_norets(H_FREE_CRQ, ua);
-
-/* tmp - will replace with SCSI logging stuff */
-#define eprintk(fmt, args...) \
-do { \
- printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
-} while (0)
-/* #define dprintk eprintk */
-#define dprintk(fmt, args...)
-
-struct vio_port {
- struct vio_dev *dma_dev;
-
- struct crq_queue crq_queue;
- struct work_struct crq_work;
-
- unsigned long liobn;
- unsigned long riobn;
- struct srp_target *target;
-
- struct srp_rport *rport;
-};
-
-static struct workqueue_struct *vtgtd;
-static struct scsi_transport_template *ibmvstgt_transport_template;
-
-/*
- * These are fixed for the system and come from the Open Firmware device tree.
- * We just store them here to save getting them every time.
- */
-static char system_id[64] = "";
-static char partition_name[97] = "UNKNOWN";
-static unsigned int partition_number = -1;
-
-static struct vio_port *target_to_port(struct srp_target *target)
-{
- return (struct vio_port *) target->ldata;
-}
-
-static inline union viosrp_iu *vio_iu(struct iu_entry *iue)
-{
- return (union viosrp_iu *) (iue->sbuf->buf);
-}
-
-static int send_iu(struct iu_entry *iue, uint64_t length, uint8_t format)
-{
- struct srp_target *target = iue->target;
- struct vio_port *vport = target_to_port(target);
- long rc, rc1;
- union {
- struct viosrp_crq cooked;
- uint64_t raw[2];
- } crq;
-
- /* First copy the SRP */
- rc = h_copy_rdma(length, vport->liobn, iue->sbuf->dma,
- vport->riobn, iue->remote_token);
-
- if (rc)
- eprintk("Error %ld transferring data\n", rc);
-
- crq.cooked.valid = 0x80;
- crq.cooked.format = format;
- crq.cooked.reserved = 0x00;
- crq.cooked.timeout = 0x00;
- crq.cooked.IU_length = length;
- crq.cooked.IU_data_ptr = vio_iu(iue)->srp.rsp.tag;
-
- if (rc == 0)
- crq.cooked.status = 0x99; /* Just needs to be non-zero */
- else
- crq.cooked.status = 0x00;
-
- rc1 = h_send_crq(vport->dma_dev->unit_address, crq.raw[0], crq.raw[1]);
-
- if (rc1) {
- eprintk("%ld sending response\n", rc1);
- return rc1;
- }
-
- return rc;
-}
-
-#define SRP_RSP_SENSE_DATA_LEN 18
-
-static int send_rsp(struct iu_entry *iue, struct scsi_cmnd *sc,
- unsigned char status, unsigned char asc)
-{
- union viosrp_iu *iu = vio_iu(iue);
- uint64_t tag = iu->srp.rsp.tag;
-
- /* If the linked bit is on and status is good */
- if (test_bit(V_LINKED, &iue->flags) && (status == NO_SENSE))
- status = 0x10;
-
- memset(iu, 0, sizeof(struct srp_rsp));
- iu->srp.rsp.opcode = SRP_RSP;
- iu->srp.rsp.req_lim_delta = 1;
- iu->srp.rsp.tag = tag;
-
- if (test_bit(V_DIOVER, &iue->flags))
- iu->srp.rsp.flags |= SRP_RSP_FLAG_DIOVER;
-
- iu->srp.rsp.data_in_res_cnt = 0;
- iu->srp.rsp.data_out_res_cnt = 0;
-
- iu->srp.rsp.flags &= ~SRP_RSP_FLAG_RSPVALID;
-
- iu->srp.rsp.resp_data_len = 0;
- iu->srp.rsp.status = status;
- if (status) {
- uint8_t *sense = iu->srp.rsp.data;
-
- if (sc) {
- iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
- iu->srp.rsp.sense_data_len = SCSI_SENSE_BUFFERSIZE;
- memcpy(sense, sc->sense_buffer, SCSI_SENSE_BUFFERSIZE);
- } else {
- iu->srp.rsp.status = SAM_STAT_CHECK_CONDITION;
- iu->srp.rsp.flags |= SRP_RSP_FLAG_SNSVALID;
- iu->srp.rsp.sense_data_len = SRP_RSP_SENSE_DATA_LEN;
-
- /* Valid bit and 'current errors' */
- sense[0] = (0x1 << 7 | 0x70);
- /* Sense key */
- sense[2] = status;
- /* Additional sense length */
- sense[7] = 0xa; /* 10 bytes */
- /* Additional sense code */
- sense[12] = asc;
- }
- }
-
- send_iu(iue, sizeof(iu->srp.rsp) + SRP_RSP_SENSE_DATA_LEN,
- VIOSRP_SRP_FORMAT);
-
- return 0;
-}
-
-static void handle_cmd_queue(struct srp_target *target)
-{
- struct Scsi_Host *shost = target->shost;
- struct srp_rport *rport = target_to_port(target)->rport;
- struct iu_entry *iue;
- struct srp_cmd *cmd;
- unsigned long flags;
- int err;
-
-retry:
- spin_lock_irqsave(&target->lock, flags);
-
- list_for_each_entry(iue, &target->cmd_queue, ilist) {
- if (!test_and_set_bit(V_FLYING, &iue->flags)) {
- spin_unlock_irqrestore(&target->lock, flags);
- cmd = iue->sbuf->buf;
- err = srp_cmd_queue(shost, cmd, iue,
- (unsigned long)rport, 0);
- if (err) {
- eprintk("cannot queue cmd %p %d\n", cmd, err);
- srp_iu_put(iue);
- }
- goto retry;
- }
- }
-
- spin_unlock_irqrestore(&target->lock, flags);
-}
-
-static int ibmvstgt_rdma(struct scsi_cmnd *sc, struct scatterlist *sg, int nsg,
- struct srp_direct_buf *md, int nmd,
- enum dma_data_direction dir, unsigned int rest)
-{
- struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
- struct srp_target *target = iue->target;
- struct vio_port *vport = target_to_port(target);
- dma_addr_t token;
- long err;
- unsigned int done = 0;
- int i, sidx, soff;
-
- sidx = soff = 0;
- token = sg_dma_address(sg + sidx);
-
- for (i = 0; i < nmd && rest; i++) {
- unsigned int mdone, mlen;
-
- mlen = min(rest, md[i].len);
- for (mdone = 0; mlen;) {
- int slen = min(sg_dma_len(sg + sidx) - soff, mlen);
-
- if (dir == DMA_TO_DEVICE)
- err = h_copy_rdma(slen,
- vport->riobn,
- md[i].va + mdone,
- vport->liobn,
- token + soff);
- else
- err = h_copy_rdma(slen,
- vport->liobn,
- token + soff,
- vport->riobn,
- md[i].va + mdone);
-
- if (err != H_SUCCESS) {
- eprintk("rdma error %d %d %ld\n", dir, slen, err);
- return -EIO;
- }
-
- mlen -= slen;
- mdone += slen;
- soff += slen;
- done += slen;
-
- if (soff == sg_dma_len(sg + sidx)) {
- sidx++;
- soff = 0;
- token = sg_dma_address(sg + sidx);
-
- if (sidx > nsg) {
- eprintk("out of sg %p %d %d\n",
- iue, sidx, nsg);
- return -EIO;
- }
- }
- };
-
- rest -= mlen;
- }
- return 0;
-}
-
-static int ibmvstgt_cmd_done(struct scsi_cmnd *sc,
- void (*done)(struct scsi_cmnd *))
-{
- unsigned long flags;
- struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
- struct srp_target *target = iue->target;
- int err = 0;
-
- dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0],
- scsi_sg_count(sc));
-
- if (scsi_sg_count(sc))
- err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1);
-
- spin_lock_irqsave(&target->lock, flags);
- list_del(&iue->ilist);
- spin_unlock_irqrestore(&target->lock, flags);
-
- if (err|| sc->result != SAM_STAT_GOOD) {
- eprintk("operation failed %p %d %x\n",
- iue, sc->result, vio_iu(iue)->srp.cmd.cdb[0]);
- send_rsp(iue, sc, HARDWARE_ERROR, 0x00);
- } else
- send_rsp(iue, sc, NO_SENSE, 0x00);
-
- done(sc);
- srp_iu_put(iue);
- return 0;
-}
-
-int send_adapter_info(struct iu_entry *iue,
- dma_addr_t remote_buffer, uint16_t length)
-{
- struct srp_target *target = iue->target;
- struct vio_port *vport = target_to_port(target);
- struct Scsi_Host *shost = target->shost;
- dma_addr_t data_token;
- struct mad_adapter_info_data *info;
- int err;
-
- info = dma_alloc_coherent(target->dev, sizeof(*info), &data_token,
- GFP_KERNEL);
- if (!info) {
- eprintk("bad dma_alloc_coherent %p\n", target);
- return 1;
- }
-
- /* Get remote info */
- err = h_copy_rdma(sizeof(*info), vport->riobn, remote_buffer,
- vport->liobn, data_token);
- if (err == H_SUCCESS) {
- dprintk("Client connect: %s (%d)\n",
- info->partition_name, info->partition_number);
- }
-
- memset(info, 0, sizeof(*info));
-
- strcpy(info->srp_version, "16.a");
- strncpy(info->partition_name, partition_name,
- sizeof(info->partition_name));
- info->partition_number = partition_number;
- info->mad_version = 1;
- info->os_type = 2;
- info->port_max_txu[0] = shost->hostt->max_sectors << 9;
-
- /* Send our info to remote */
- err = h_copy_rdma(sizeof(*info), vport->liobn, data_token,
- vport->riobn, remote_buffer);
-
- dma_free_coherent(target->dev, sizeof(*info), info, data_token);
-
- if (err != H_SUCCESS) {
- eprintk("Error sending adapter info %d\n", err);
- return 1;
- }
-
- return 0;
-}
-
-static void process_login(struct iu_entry *iue)
-{
- union viosrp_iu *iu = vio_iu(iue);
- struct srp_login_rsp *rsp = &iu->srp.login_rsp;
- uint64_t tag = iu->srp.rsp.tag;
- struct Scsi_Host *shost = iue->target->shost;
- struct srp_target *target = host_to_srp_target(shost);
- struct vio_port *vport = target_to_port(target);
- struct srp_rport_identifiers ids;
-
- memset(&ids, 0, sizeof(ids));
- sprintf(ids.port_id, "%x", vport->dma_dev->unit_address);
- ids.roles = SRP_RPORT_ROLE_INITIATOR;
- if (!vport->rport)
- vport->rport = srp_rport_add(shost, &ids);
-
- /* TODO handle case that requested size is wrong and
- * buffer format is wrong
- */
- memset(iu, 0, sizeof(struct srp_login_rsp));
- rsp->opcode = SRP_LOGIN_RSP;
- rsp->req_lim_delta = INITIAL_SRP_LIMIT;
- rsp->tag = tag;
- rsp->max_it_iu_len = sizeof(union srp_iu);
- rsp->max_ti_iu_len = sizeof(union srp_iu);
- /* direct and indirect */
- rsp->buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
-
- send_iu(iue, sizeof(*rsp), VIOSRP_SRP_FORMAT);
-}
-
-static inline void queue_cmd(struct iu_entry *iue)
-{
- struct srp_target *target = iue->target;
- unsigned long flags;
-
- spin_lock_irqsave(&target->lock, flags);
- list_add_tail(&iue->ilist, &target->cmd_queue);
- spin_unlock_irqrestore(&target->lock, flags);
-}
-
-static int process_tsk_mgmt(struct iu_entry *iue)
-{
- union viosrp_iu *iu = vio_iu(iue);
- int fn;
-
- dprintk("%p %u\n", iue, iu->srp.tsk_mgmt.tsk_mgmt_func);
-
- switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
- case SRP_TSK_ABORT_TASK:
- fn = ABORT_TASK;
- break;
- case SRP_TSK_ABORT_TASK_SET:
- fn = ABORT_TASK_SET;
- break;
- case SRP_TSK_CLEAR_TASK_SET:
- fn = CLEAR_TASK_SET;
- break;
- case SRP_TSK_LUN_RESET:
- fn = LOGICAL_UNIT_RESET;
- break;
- case SRP_TSK_CLEAR_ACA:
- fn = CLEAR_ACA;
- break;
- default:
- fn = 0;
- }
- if (fn)
- scsi_tgt_tsk_mgmt_request(iue->target->shost,
- (unsigned long)iue->target->shost,
- fn,
- iu->srp.tsk_mgmt.task_tag,
- (struct scsi_lun *) &iu->srp.tsk_mgmt.lun,
- iue);
- else
- send_rsp(iue, NULL, ILLEGAL_REQUEST, 0x20);
-
- return !fn;
-}
-
-static int process_mad_iu(struct iu_entry *iue)
-{
- union viosrp_iu *iu = vio_iu(iue);
- struct viosrp_adapter_info *info;
- struct viosrp_host_config *conf;
-
- switch (iu->mad.empty_iu.common.type) {
- case VIOSRP_EMPTY_IU_TYPE:
- eprintk("%s\n", "Unsupported EMPTY MAD IU");
- break;
- case VIOSRP_ERROR_LOG_TYPE:
- eprintk("%s\n", "Unsupported ERROR LOG MAD IU");
- iu->mad.error_log.common.status = 1;
- send_iu(iue, sizeof(iu->mad.error_log), VIOSRP_MAD_FORMAT);
- break;
- case VIOSRP_ADAPTER_INFO_TYPE:
- info = &iu->mad.adapter_info;
- info->common.status = send_adapter_info(iue, info->buffer,
- info->common.length);
- send_iu(iue, sizeof(*info), VIOSRP_MAD_FORMAT);
- break;
- case VIOSRP_HOST_CONFIG_TYPE:
- conf = &iu->mad.host_config;
- conf->common.status = 1;
- send_iu(iue, sizeof(*conf), VIOSRP_MAD_FORMAT);
- break;
- default:
- eprintk("Unknown type %u\n", iu->srp.rsp.opcode);
- }
-
- return 1;
-}
-
-static int process_srp_iu(struct iu_entry *iue)
-{
- union viosrp_iu *iu = vio_iu(iue);
- int done = 1;
- u8 opcode = iu->srp.rsp.opcode;
-
- switch (opcode) {
- case SRP_LOGIN_REQ:
- process_login(iue);
- break;
- case SRP_TSK_MGMT:
- done = process_tsk_mgmt(iue);
- break;
- case SRP_CMD:
- queue_cmd(iue);
- done = 0;
- break;
- case SRP_LOGIN_RSP:
- case SRP_I_LOGOUT:
- case SRP_T_LOGOUT:
- case SRP_RSP:
- case SRP_CRED_REQ:
- case SRP_CRED_RSP:
- case SRP_AER_REQ:
- case SRP_AER_RSP:
- eprintk("Unsupported type %u\n", opcode);
- break;
- default:
- eprintk("Unknown type %u\n", opcode);
- }
-
- return done;
-}
-
-static void process_iu(struct viosrp_crq *crq, struct srp_target *target)
-{
- struct vio_port *vport = target_to_port(target);
- struct iu_entry *iue;
- long err;
- int done = 1;
-
- iue = srp_iu_get(target);
- if (!iue) {
- eprintk("Error getting IU from pool, %p\n", target);
- return;
- }
-
- iue->remote_token = crq->IU_data_ptr;
-
- err = h_copy_rdma(crq->IU_length, vport->riobn,
- iue->remote_token, vport->liobn, iue->sbuf->dma);
-
- if (err != H_SUCCESS) {
- eprintk("%ld transferring data error %p\n", err, iue);
- goto out;
- }
-
- if (crq->format == VIOSRP_MAD_FORMAT)
- done = process_mad_iu(iue);
- else
- done = process_srp_iu(iue);
-out:
- if (done)
- srp_iu_put(iue);
-}
-
-static irqreturn_t ibmvstgt_interrupt(int dummy, void *data)
-{
- struct srp_target *target = data;
- struct vio_port *vport = target_to_port(target);
-
- vio_disable_interrupts(vport->dma_dev);
- queue_work(vtgtd, &vport->crq_work);
-
- return IRQ_HANDLED;
-}
-
-static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
-{
- int err;
- struct vio_port *vport = target_to_port(target);
-
- queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL);
- if (!queue->msgs)
- goto malloc_failed;
- queue->size = PAGE_SIZE / sizeof(*queue->msgs);
-
- queue->msg_token = dma_map_single(target->dev, queue->msgs,
- queue->size * sizeof(*queue->msgs),
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(target->dev, queue->msg_token))
- goto map_failed;
-
- err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
- PAGE_SIZE);
-
- /* If the adapter was left active for some reason (like kexec)
- * try freeing and re-registering
- */
- if (err == H_RESOURCE) {
- do {
- err = h_free_crq(vport->dma_dev->unit_address);
- } while (err == H_BUSY || H_IS_LONG_BUSY(err));
-
- err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
- PAGE_SIZE);
- }
-
- if (err != H_SUCCESS && err != 2) {
- eprintk("Error 0x%x opening virtual adapter\n", err);
- goto reg_crq_failed;
- }
-
- err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt,
- 0, "ibmvstgt", target);
- if (err)
- goto req_irq_failed;
-
- vio_enable_interrupts(vport->dma_dev);
-
- h_send_crq(vport->dma_dev->unit_address, 0xC001000000000000, 0);
-
- queue->cur = 0;
- spin_lock_init(&queue->lock);
-
- return 0;
-
-req_irq_failed:
- do {
- err = h_free_crq(vport->dma_dev->unit_address);
- } while (err == H_BUSY || H_IS_LONG_BUSY(err));
-
-reg_crq_failed:
- dma_unmap_single(target->dev, queue->msg_token,
- queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-map_failed:
- free_page((unsigned long) queue->msgs);
-
-malloc_failed:
- return -ENOMEM;
-}
-
-static void crq_queue_destroy(struct srp_target *target)
-{
- struct vio_port *vport = target_to_port(target);
- struct crq_queue *queue = &vport->crq_queue;
- int err;
-
- free_irq(vport->dma_dev->irq, target);
- do {
- err = h_free_crq(vport->dma_dev->unit_address);
- } while (err == H_BUSY || H_IS_LONG_BUSY(err));
-
- dma_unmap_single(target->dev, queue->msg_token,
- queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
-
- free_page((unsigned long) queue->msgs);
-}
-
-static void process_crq(struct viosrp_crq *crq, struct srp_target *target)
-{
- struct vio_port *vport = target_to_port(target);
- dprintk("%x %x\n", crq->valid, crq->format);
-
- switch (crq->valid) {
- case 0xC0:
- /* initialization */
- switch (crq->format) {
- case 0x01:
- h_send_crq(vport->dma_dev->unit_address,
- 0xC002000000000000, 0);
- break;
- case 0x02:
- break;
- default:
- eprintk("Unknown format %u\n", crq->format);
- }
- break;
- case 0xFF:
- /* transport event */
- break;
- case 0x80:
- /* real payload */
- switch (crq->format) {
- case VIOSRP_SRP_FORMAT:
- case VIOSRP_MAD_FORMAT:
- process_iu(crq, target);
- break;
- case VIOSRP_OS400_FORMAT:
- case VIOSRP_AIX_FORMAT:
- case VIOSRP_LINUX_FORMAT:
- case VIOSRP_INLINE_FORMAT:
- eprintk("Unsupported format %u\n", crq->format);
- break;
- default:
- eprintk("Unknown format %u\n", crq->format);
- }
- break;
- default:
- eprintk("unknown message type 0x%02x!?\n", crq->valid);
- }
-}
-
-static inline struct viosrp_crq *next_crq(struct crq_queue *queue)
-{
- struct viosrp_crq *crq;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->lock, flags);
- crq = &queue->msgs[queue->cur];
- if (crq->valid & 0x80) {
- if (++queue->cur == queue->size)
- queue->cur = 0;
- } else
- crq = NULL;
- spin_unlock_irqrestore(&queue->lock, flags);
-
- return crq;
-}
-
-static void handle_crq(struct work_struct *work)
-{
- struct vio_port *vport = container_of(work, struct vio_port, crq_work);
- struct srp_target *target = vport->target;
- struct viosrp_crq *crq;
- int done = 0;
-
- while (!done) {
- while ((crq = next_crq(&vport->crq_queue)) != NULL) {
- process_crq(crq, target);
- crq->valid = 0x00;
- }
-
- vio_enable_interrupts(vport->dma_dev);
-
- crq = next_crq(&vport->crq_queue);
- if (crq) {
- vio_disable_interrupts(vport->dma_dev);
- process_crq(crq, target);
- crq->valid = 0x00;
- } else
- done = 1;
- }
-
- handle_cmd_queue(target);
-}
-
-
-static int ibmvstgt_eh_abort_handler(struct scsi_cmnd *sc)
-{
- unsigned long flags;
- struct iu_entry *iue = (struct iu_entry *) sc->SCp.ptr;
- struct srp_target *target = iue->target;
-
- dprintk("%p %p %x\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0]);
-
- spin_lock_irqsave(&target->lock, flags);
- list_del(&iue->ilist);
- spin_unlock_irqrestore(&target->lock, flags);
-
- srp_iu_put(iue);
-
- return 0;
-}
-
-static int ibmvstgt_tsk_mgmt_response(struct Scsi_Host *shost,
- u64 itn_id, u64 mid, int result)
-{
- struct iu_entry *iue = (struct iu_entry *) ((void *) mid);
- union viosrp_iu *iu = vio_iu(iue);
- unsigned char status, asc;
-
- eprintk("%p %d\n", iue, result);
- status = NO_SENSE;
- asc = 0;
-
- switch (iu->srp.tsk_mgmt.tsk_mgmt_func) {
- case SRP_TSK_ABORT_TASK:
- asc = 0x14;
- if (result)
- status = ABORTED_COMMAND;
- break;
- default:
- break;
- }
-
- send_rsp(iue, NULL, status, asc);
- srp_iu_put(iue);
-
- return 0;
-}
-
-static int ibmvstgt_it_nexus_response(struct Scsi_Host *shost, u64 itn_id,
- int result)
-{
- struct srp_target *target = host_to_srp_target(shost);
- struct vio_port *vport = target_to_port(target);
-
- if (result) {
- eprintk("%p %d\n", shost, result);
- srp_rport_del(vport->rport);
- vport->rport = NULL;
- }
- return 0;
-}
-
-static ssize_t system_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
-}
-
-static ssize_t partition_number_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
-}
-
-static ssize_t unit_address_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct srp_target *target = host_to_srp_target(shost);
- struct vio_port *vport = target_to_port(target);
- return snprintf(buf, PAGE_SIZE, "%x\n", vport->dma_dev->unit_address);
-}
-
-static DEVICE_ATTR(system_id, S_IRUGO, system_id_show, NULL);
-static DEVICE_ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
-static DEVICE_ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
-
-static struct device_attribute *ibmvstgt_attrs[] = {
- &dev_attr_system_id,
- &dev_attr_partition_number,
- &dev_attr_unit_address,
- NULL,
-};
-
-static struct scsi_host_template ibmvstgt_sht = {
- .name = TGT_NAME,
- .module = THIS_MODULE,
- .can_queue = INITIAL_SRP_LIMIT,
- .sg_tablesize = SG_ALL,
- .use_clustering = DISABLE_CLUSTERING,
- .max_sectors = DEFAULT_MAX_SECTORS,
- .transfer_response = ibmvstgt_cmd_done,
- .eh_abort_handler = ibmvstgt_eh_abort_handler,
- .shost_attrs = ibmvstgt_attrs,
- .proc_name = TGT_NAME,
- .supported_mode = MODE_TARGET,
-};
-
-static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id)
-{
- struct Scsi_Host *shost;
- struct srp_target *target;
- struct vio_port *vport;
- unsigned int *dma, dma_size;
- int err = -ENOMEM;
-
- vport = kzalloc(sizeof(struct vio_port), GFP_KERNEL);
- if (!vport)
- return err;
- shost = scsi_host_alloc(&ibmvstgt_sht, sizeof(struct srp_target));
- if (!shost)
- goto free_vport;
- shost->transportt = ibmvstgt_transport_template;
-
- target = host_to_srp_target(shost);
- target->shost = shost;
- vport->dma_dev = dev;
- target->ldata = vport;
- vport->target = target;
- err = srp_target_alloc(target, &dev->dev, INITIAL_SRP_LIMIT,
- SRP_MAX_IU_LEN);
- if (err)
- goto put_host;
-
- dma = (unsigned int *) vio_get_attribute(dev, "ibm,my-dma-window",
- &dma_size);
- if (!dma || dma_size != 40) {
- eprintk("Couldn't get window property %d\n", dma_size);
- err = -EIO;
- goto free_srp_target;
- }
- vport->liobn = dma[0];
- vport->riobn = dma[5];
-
- INIT_WORK(&vport->crq_work, handle_crq);
-
- err = scsi_add_host(shost, target->dev);
- if (err)
- goto free_srp_target;
-
- err = scsi_tgt_alloc_queue(shost);
- if (err)
- goto remove_host;
-
- err = crq_queue_create(&vport->crq_queue, target);
- if (err)
- goto free_queue;
-
- return 0;
-free_queue:
- scsi_tgt_free_queue(shost);
-remove_host:
- scsi_remove_host(shost);
-free_srp_target:
- srp_target_free(target);
-put_host:
- scsi_host_put(shost);
-free_vport:
- kfree(vport);
- return err;
-}
-
-static int ibmvstgt_remove(struct vio_dev *dev)
-{
- struct srp_target *target = dev_get_drvdata(&dev->dev);
- struct Scsi_Host *shost = target->shost;
- struct vio_port *vport = target->ldata;
-
- crq_queue_destroy(target);
- srp_remove_host(shost);
- scsi_remove_host(shost);
- scsi_tgt_free_queue(shost);
- srp_target_free(target);
- kfree(vport);
- scsi_host_put(shost);
- return 0;
-}
-
-static struct vio_device_id ibmvstgt_device_table[] = {
- {"v-scsi-host", "IBM,v-scsi-host"},
- {"",""}
-};
-
-MODULE_DEVICE_TABLE(vio, ibmvstgt_device_table);
-
-static struct vio_driver ibmvstgt_driver = {
- .id_table = ibmvstgt_device_table,
- .probe = ibmvstgt_probe,
- .remove = ibmvstgt_remove,
- .name = "ibmvscsis",
-};
-
-static int get_system_info(void)
-{
- struct device_node *rootdn;
- const char *id, *model, *name;
- const unsigned int *num;
-
- rootdn = of_find_node_by_path("/");
- if (!rootdn)
- return -ENOENT;
-
- model = of_get_property(rootdn, "model", NULL);
- id = of_get_property(rootdn, "system-id", NULL);
- if (model && id)
- snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
-
- name = of_get_property(rootdn, "ibm,partition-name", NULL);
- if (name)
- strncpy(partition_name, name, sizeof(partition_name));
-
- num = of_get_property(rootdn, "ibm,partition-no", NULL);
- if (num)
- partition_number = *num;
-
- of_node_put(rootdn);
- return 0;
-}
-
-static struct srp_function_template ibmvstgt_transport_functions = {
- .tsk_mgmt_response = ibmvstgt_tsk_mgmt_response,
- .it_nexus_response = ibmvstgt_it_nexus_response,
-};
-
-static int __init ibmvstgt_init(void)
-{
- int err = -ENOMEM;
-
- printk("IBM eServer i/pSeries Virtual SCSI Target Driver\n");
-
- ibmvstgt_transport_template =
- srp_attach_transport(&ibmvstgt_transport_functions);
- if (!ibmvstgt_transport_template)
- return err;
-
- vtgtd = create_workqueue("ibmvtgtd");
- if (!vtgtd)
- goto release_transport;
-
- err = get_system_info();
- if (err)
- goto destroy_wq;
-
- err = vio_register_driver(&ibmvstgt_driver);
- if (err)
- goto destroy_wq;
-
- return 0;
-destroy_wq:
- destroy_workqueue(vtgtd);
-release_transport:
- srp_release_transport(ibmvstgt_transport_template);
- return err;
-}
-
-static void __exit ibmvstgt_exit(void)
-{
- printk("Unregister IBM virtual SCSI driver\n");
-
- destroy_workqueue(vtgtd);
- vio_unregister_driver(&ibmvstgt_driver);
- srp_release_transport(ibmvstgt_transport_template);
-}
-
-MODULE_DESCRIPTION("IBM Virtual SCSI Target");
-MODULE_AUTHOR("Santiago Leon");
-MODULE_LICENSE("GPL");
-
-module_init(ibmvstgt_init);
-module_exit(ibmvstgt_exit);
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
index b1c4d831137d..ddf0694d87f0 100644
--- a/drivers/scsi/in2000.c
+++ b/drivers/scsi/in2000.c
@@ -2251,14 +2251,14 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "\nconnected: ");
if (hd->connected) {
cmd = (Scsi_Cmnd *) hd->connected;
- seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
}
}
if (hd->proc & PR_INPUTQ) {
seq_printf(m, "\ninput_Q: ");
cmd = (Scsi_Cmnd *) hd->input_Q;
while (cmd) {
- seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
@@ -2266,7 +2266,7 @@ static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "\ndisconnected_Q:");
cmd = (Scsi_Cmnd *) hd->disconnected_Q;
while (cmd) {
- seq_printf(m, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
+ seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 695b34e9154e..4198e45ea941 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -356,7 +356,7 @@ static int isci_setup_interrupts(struct pci_dev *pdev)
for (i = 0; i < num_msix; i++)
pci_info->msix_entries[i].entry = i;
- err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
+ err = pci_enable_msix_exact(pdev, pci_info->msix_entries, num_msix);
if (err)
goto intx;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 3d1bc67bac9d..f9f3a1224dfa 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -260,7 +260,7 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_tm *tmf = &conn->tmhdr;
- unsigned int hdr_lun;
+ u64 hdr_lun;
if (conn->tmf_state == TMF_INITIAL)
return 0;
@@ -1859,8 +1859,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
* Fail commands. session lock held and recv side suspended and xmit
* thread flushed
*/
-static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
- int error)
+static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
{
struct iscsi_task *task;
int i;
@@ -2279,7 +2278,8 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
cls_session = starget_to_session(scsi_target(sc->device));
session = cls_session->dd_data;
- ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
+ ISCSI_DBG_EH(session, "LU Reset [sc %p lun %llu]\n", sc,
+ sc->device->lun);
mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->frwd_lock);
@@ -2971,7 +2971,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
*/
for (;;) {
spin_lock_irqsave(session->host->host_lock, flags);
- if (!session->host->host_busy) { /* OK for ERL == 0 */
+ if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
spin_unlock_irqrestore(session->host->host_lock, flags);
break;
}
@@ -2979,7 +2979,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
msleep_interruptible(500);
iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
"host_busy %d host_failed %d\n",
- session->host->host_busy,
+ atomic_read(&session->host->host_busy),
session->host->host_failed);
/*
* force eh_abort() to unblock
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 25d0f127424d..24e477d2ea70 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -404,7 +404,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
int_to_scsilun(cmd->device->lun, &lun);
- SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
+ SAS_DPRINTK("eh: device %llx LUN %llx has the task\n",
SAS_ADDR(dev->sas_addr),
cmd->device->lun);
@@ -490,7 +490,8 @@ static void sas_wait_eh(struct domain_device *dev)
}
EXPORT_SYMBOL(sas_wait_eh);
-static int sas_queue_reset(struct domain_device *dev, int reset_type, int lun, int wait)
+static int sas_queue_reset(struct domain_device *dev, int reset_type,
+ u64 lun, int wait)
{
struct sas_ha_struct *ha = dev->port->ha;
int scheduled = 0, tries = 100;
@@ -689,7 +690,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
- SAS_DPRINTK("dev %016llx LU %x is "
+ SAS_DPRINTK("dev %016llx LU %llx is "
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
@@ -742,7 +743,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
* of effort could recover from errors. Quite
* possibly the HA just disappeared.
*/
- SAS_DPRINTK("error from device %llx, LUN %x "
+ SAS_DPRINTK("error from device %llx, LUN %llx "
"couldn't be recovered in any way\n",
SAS_ADDR(task->dev->sas_addr),
cmd->device->lun);
@@ -812,7 +813,7 @@ retry:
spin_unlock_irq(shost->host_lock);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
- __func__, shost->host_busy, shost->host_failed);
+ __func__, atomic_read(&shost->host_busy), shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
@@ -857,7 +858,8 @@ out:
goto retry;
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
- __func__, shost->host_busy, shost->host_failed, tries);
+ __func__, atomic_read(&shost->host_busy),
+ shost->host_failed, tries);
}
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
@@ -941,7 +943,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
} else {
- SAS_DPRINTK("device %llx, LUN %x doesn't support "
+ SAS_DPRINTK("device %llx, LUN %llx doesn't support "
"TCQ\n", SAS_ADDR(dev->sas_addr),
scsi_dev->lun);
scsi_dev->tagged_supported = 0;
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
deleted file mode 100644
index 0707ecdbaa32..000000000000
--- a/drivers/scsi/libsrp.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * SCSI RDMA Protocol lib functions
- *
- * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/kfifo.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_tgt.h>
-#include <scsi/srp.h>
-#include <scsi/libsrp.h>
-
-enum srp_task_attributes {
- SRP_SIMPLE_TASK = 0,
- SRP_HEAD_TASK = 1,
- SRP_ORDERED_TASK = 2,
- SRP_ACA_TASK = 4
-};
-
-/* tmp - will replace with SCSI logging stuff */
-#define eprintk(fmt, args...) \
-do { \
- printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
-} while (0)
-/* #define dprintk eprintk */
-#define dprintk(fmt, args...)
-
-static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
- struct srp_buf **ring)
-{
- int i;
- struct iu_entry *iue;
-
- q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
- if (!q->pool)
- return -ENOMEM;
- q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
- if (!q->items)
- goto free_pool;
-
- spin_lock_init(&q->lock);
- kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *));
-
- for (i = 0, iue = q->items; i < max; i++) {
- kfifo_in(&q->queue, (void *) &iue, sizeof(void *));
- iue->sbuf = ring[i];
- iue++;
- }
- return 0;
-
- kfree(q->items);
-free_pool:
- kfree(q->pool);
- return -ENOMEM;
-}
-
-static void srp_iu_pool_free(struct srp_queue *q)
-{
- kfree(q->items);
- kfree(q->pool);
-}
-
-static struct srp_buf **srp_ring_alloc(struct device *dev,
- size_t max, size_t size)
-{
- int i;
- struct srp_buf **ring;
-
- ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
- if (!ring)
- return NULL;
-
- for (i = 0; i < max; i++) {
- ring[i] = kzalloc(sizeof(struct srp_buf), GFP_KERNEL);
- if (!ring[i])
- goto out;
- ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
- GFP_KERNEL);
- if (!ring[i]->buf)
- goto out;
- }
- return ring;
-
-out:
- for (i = 0; i < max && ring[i]; i++) {
- if (ring[i]->buf)
- dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
- kfree(ring[i]);
- }
- kfree(ring);
-
- return NULL;
-}
-
-static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max,
- size_t size)
-{
- int i;
-
- for (i = 0; i < max; i++) {
- dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
- kfree(ring[i]);
- }
- kfree(ring);
-}
-
-int srp_target_alloc(struct srp_target *target, struct device *dev,
- size_t nr, size_t iu_size)
-{
- int err;
-
- spin_lock_init(&target->lock);
- INIT_LIST_HEAD(&target->cmd_queue);
-
- target->dev = dev;
- dev_set_drvdata(target->dev, target);
-
- target->srp_iu_size = iu_size;
- target->rx_ring_size = nr;
- target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
- if (!target->rx_ring)
- return -ENOMEM;
- err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
- if (err)
- goto free_ring;
-
- return 0;
-
-free_ring:
- srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(srp_target_alloc);
-
-void srp_target_free(struct srp_target *target)
-{
- srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
- target->srp_iu_size);
- srp_iu_pool_free(&target->iu_queue);
-}
-EXPORT_SYMBOL_GPL(srp_target_free);
-
-struct iu_entry *srp_iu_get(struct srp_target *target)
-{
- struct iu_entry *iue = NULL;
-
- if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue,
- sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) {
- WARN_ONCE(1, "unexpected fifo state");
- return NULL;
- }
- if (!iue)
- return iue;
- iue->target = target;
- INIT_LIST_HEAD(&iue->ilist);
- iue->flags = 0;
- return iue;
-}
-EXPORT_SYMBOL_GPL(srp_iu_get);
-
-void srp_iu_put(struct iu_entry *iue)
-{
- kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue,
- sizeof(void *), &iue->target->iu_queue.lock);
-}
-EXPORT_SYMBOL_GPL(srp_iu_put);
-
-static int srp_direct_data(struct scsi_cmnd *sc, struct srp_direct_buf *md,
- enum dma_data_direction dir, srp_rdma_t rdma_io,
- int dma_map, int ext_desc)
-{
- struct iu_entry *iue = NULL;
- struct scatterlist *sg = NULL;
- int err, nsg = 0, len;
-
- if (dma_map) {
- iue = (struct iu_entry *) sc->SCp.ptr;
- sg = scsi_sglist(sc);
-
- dprintk("%p %u %u %d\n", iue, scsi_bufflen(sc),
- md->len, scsi_sg_count(sc));
-
- nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
- DMA_BIDIRECTIONAL);
- if (!nsg) {
- printk("fail to map %p %d\n", iue, scsi_sg_count(sc));
- return 0;
- }
- len = min(scsi_bufflen(sc), md->len);
- } else
- len = md->len;
-
- err = rdma_io(sc, sg, nsg, md, 1, dir, len);
-
- if (dma_map)
- dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
-
- return err;
-}
-
-static int srp_indirect_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
- struct srp_indirect_buf *id,
- enum dma_data_direction dir, srp_rdma_t rdma_io,
- int dma_map, int ext_desc)
-{
- struct iu_entry *iue = NULL;
- struct srp_direct_buf *md = NULL;
- struct scatterlist dummy, *sg = NULL;
- dma_addr_t token = 0;
- int err = 0;
- int nmd, nsg = 0, len;
-
- if (dma_map || ext_desc) {
- iue = (struct iu_entry *) sc->SCp.ptr;
- sg = scsi_sglist(sc);
-
- dprintk("%p %u %u %d %d\n",
- iue, scsi_bufflen(sc), id->len,
- cmd->data_in_desc_cnt, cmd->data_out_desc_cnt);
- }
-
- nmd = id->table_desc.len / sizeof(struct srp_direct_buf);
-
- if ((dir == DMA_FROM_DEVICE && nmd == cmd->data_in_desc_cnt) ||
- (dir == DMA_TO_DEVICE && nmd == cmd->data_out_desc_cnt)) {
- md = &id->desc_list[0];
- goto rdma;
- }
-
- if (ext_desc && dma_map) {
- md = dma_alloc_coherent(iue->target->dev, id->table_desc.len,
- &token, GFP_KERNEL);
- if (!md) {
- eprintk("Can't get dma memory %u\n", id->table_desc.len);
- return -ENOMEM;
- }
-
- sg_init_one(&dummy, md, id->table_desc.len);
- sg_dma_address(&dummy) = token;
- sg_dma_len(&dummy) = id->table_desc.len;
- err = rdma_io(sc, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
- id->table_desc.len);
- if (err) {
- eprintk("Error copying indirect table %d\n", err);
- goto free_mem;
- }
- } else {
- eprintk("This command uses external indirect buffer\n");
- return -EINVAL;
- }
-
-rdma:
- if (dma_map) {
- nsg = dma_map_sg(iue->target->dev, sg, scsi_sg_count(sc),
- DMA_BIDIRECTIONAL);
- if (!nsg) {
- eprintk("fail to map %p %d\n", iue, scsi_sg_count(sc));
- err = -EIO;
- goto free_mem;
- }
- len = min(scsi_bufflen(sc), id->len);
- } else
- len = id->len;
-
- err = rdma_io(sc, sg, nsg, md, nmd, dir, len);
-
- if (dma_map)
- dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
-
-free_mem:
- if (token && dma_map)
- dma_free_coherent(iue->target->dev, id->table_desc.len, md, token);
-
- return err;
-}
-
-static int data_out_desc_size(struct srp_cmd *cmd)
-{
- int size = 0;
- u8 fmt = cmd->buf_fmt >> 4;
-
- switch (fmt) {
- case SRP_NO_DATA_DESC:
- break;
- case SRP_DATA_DESC_DIRECT:
- size = sizeof(struct srp_direct_buf);
- break;
- case SRP_DATA_DESC_INDIRECT:
- size = sizeof(struct srp_indirect_buf) +
- sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
- break;
- default:
- eprintk("client error. Invalid data_out_format %x\n", fmt);
- break;
- }
- return size;
-}
-
-/*
- * TODO: this can be called multiple times for a single command if it
- * has very long data.
- */
-int srp_transfer_data(struct scsi_cmnd *sc, struct srp_cmd *cmd,
- srp_rdma_t rdma_io, int dma_map, int ext_desc)
-{
- struct srp_direct_buf *md;
- struct srp_indirect_buf *id;
- enum dma_data_direction dir;
- int offset, err = 0;
- u8 format;
-
- offset = cmd->add_cdb_len & ~3;
-
- dir = srp_cmd_direction(cmd);
- if (dir == DMA_FROM_DEVICE)
- offset += data_out_desc_size(cmd);
-
- if (dir == DMA_TO_DEVICE)
- format = cmd->buf_fmt >> 4;
- else
- format = cmd->buf_fmt & ((1U << 4) - 1);
-
- switch (format) {
- case SRP_NO_DATA_DESC:
- break;
- case SRP_DATA_DESC_DIRECT:
- md = (struct srp_direct_buf *)
- (cmd->add_data + offset);
- err = srp_direct_data(sc, md, dir, rdma_io, dma_map, ext_desc);
- break;
- case SRP_DATA_DESC_INDIRECT:
- id = (struct srp_indirect_buf *)
- (cmd->add_data + offset);
- err = srp_indirect_data(sc, cmd, id, dir, rdma_io, dma_map,
- ext_desc);
- break;
- default:
- eprintk("Unknown format %d %x\n", dir, format);
- err = -EINVAL;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(srp_transfer_data);
-
-static int vscsis_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
-{
- struct srp_direct_buf *md;
- struct srp_indirect_buf *id;
- int len = 0, offset = cmd->add_cdb_len & ~3;
- u8 fmt;
-
- if (dir == DMA_TO_DEVICE)
- fmt = cmd->buf_fmt >> 4;
- else {
- fmt = cmd->buf_fmt & ((1U << 4) - 1);
- offset += data_out_desc_size(cmd);
- }
-
- switch (fmt) {
- case SRP_NO_DATA_DESC:
- break;
- case SRP_DATA_DESC_DIRECT:
- md = (struct srp_direct_buf *) (cmd->add_data + offset);
- len = md->len;
- break;
- case SRP_DATA_DESC_INDIRECT:
- id = (struct srp_indirect_buf *) (cmd->add_data + offset);
- len = id->len;
- break;
- default:
- eprintk("invalid data format %x\n", fmt);
- break;
- }
- return len;
-}
-
-int srp_cmd_queue(struct Scsi_Host *shost, struct srp_cmd *cmd, void *info,
- u64 itn_id, u64 addr)
-{
- enum dma_data_direction dir;
- struct scsi_cmnd *sc;
- int tag, len, err;
-
- switch (cmd->task_attr) {
- case SRP_SIMPLE_TASK:
- tag = MSG_SIMPLE_TAG;
- break;
- case SRP_ORDERED_TASK:
- tag = MSG_ORDERED_TAG;
- break;
- case SRP_HEAD_TASK:
- tag = MSG_HEAD_TAG;
- break;
- default:
- eprintk("Task attribute %d not supported\n", cmd->task_attr);
- tag = MSG_ORDERED_TAG;
- }
-
- dir = srp_cmd_direction(cmd);
- len = vscsis_data_length(cmd, dir);
-
- dprintk("%p %x %lx %d %d %d %llx\n", info, cmd->cdb[0],
- cmd->lun, dir, len, tag, (unsigned long long) cmd->tag);
-
- sc = scsi_host_get_command(shost, dir, GFP_KERNEL);
- if (!sc)
- return -ENOMEM;
-
- sc->SCp.ptr = info;
- memcpy(sc->cmnd, cmd->cdb, MAX_COMMAND_SIZE);
- sc->sdb.length = len;
- sc->sdb.table.sgl = (void *) (unsigned long) addr;
- sc->tag = tag;
- err = scsi_tgt_queue_command(sc, itn_id, (struct scsi_lun *)&cmd->lun,
- cmd->tag);
- if (err)
- scsi_host_put_command(shost, sc);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(srp_cmd_queue);
-
-MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
-MODULE_AUTHOR("FUJITA Tomonori");
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1d7a5c34ee8c..6eed9e76a166 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1998,6 +1998,14 @@ lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+#define LPFC_VPORT_ULL_ATTR_R(name, defval, minval, maxval, desc) \
+static uint64_t lpfc_##name = defval;\
+module_param(lpfc_##name, ullong, S_IRUGO);\
+MODULE_PARM_DESC(lpfc_##name, desc);\
+lpfc_vport_param_show(name)\
+lpfc_vport_param_init(name, defval, minval, maxval)\
+static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
+
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
module_param(lpfc_##name, uint, S_IRUGO);\
@@ -4596,7 +4604,7 @@ LPFC_VPORT_ATTR(discovery_threads, 32, 1, 64, "Maximum number of ELS commands "
# Value range is [0,65535]. Default value is 255.
# NOTE: The SCSI layer might probe all allowed LUN on some old targets.
*/
-LPFC_VPORT_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
+LPFC_VPORT_ULL_ATTR_R(max_luns, 255, 0, 65535, "Maximum allowed LUN ID");
/*
# lpfc_poll_tmo: .Milliseconds driver will wait between polling FCP ring.
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 06f9a5b79e66..a5769a9960ac 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8242,7 +8242,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0420 PCI enable MSI-X failed (%d)\n", rc);
- goto msi_fail_out;
+ goto vec_fail_out;
}
for (i = 0; i < LPFC_MSIX_VECTORS; i++)
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -8320,6 +8320,8 @@ irq_fail_out:
msi_fail_out:
/* Unconfigure MSI-X capability structure */
pci_disable_msix(phba->pcidev);
+
+vec_fail_out:
return rc;
}
@@ -8812,7 +8814,7 @@ enable_msix_vectors:
} else if (rc) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
- goto msi_fail_out;
+ goto vec_fail_out;
}
/* Log MSI-X vector assignment */
@@ -8875,9 +8877,10 @@ cfg_fail_out:
&phba->sli4_hba.fcp_eq_hdl[index]);
}
-msi_fail_out:
/* Unconfigure MSI-X capability structure */
pci_disable_msix(phba->pcidev);
+
+vec_fail_out:
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2df11daad85b..7862c5540861 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -258,7 +258,7 @@ static void
lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp,
- uint32_t lun,
+ uint64_t lun,
uint32_t old_val,
uint32_t new_val)
{
@@ -3823,7 +3823,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"2719 Invalid response length: "
- "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
+ "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
cmnd->device->id,
cmnd->device->lun, cmnd->cmnd[0],
rsplen);
@@ -3834,7 +3834,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"2757 Protocol failure detected during "
"processing of FCP I/O op: "
- "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
+ "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
cmnd->device->id,
cmnd->device->lun, cmnd->cmnd[0],
fcprsp->rspInfo3);
@@ -4045,7 +4045,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
else
logit = LOG_FCP | LOG_FCP_UNDER;
lpfc_printf_vlog(vport, KERN_WARNING, logit,
- "9030 FCP cmd x%x failed <%d/%d> "
+ "9030 FCP cmd x%x failed <%d/%lld> "
"status: x%x result: x%x "
"sid: x%x did: x%x oxid: x%x "
"Data: x%x x%x\n",
@@ -4157,7 +4157,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0710 Iodone <%d/%d> cmd %p, error "
+ "0710 Iodone <%d/%llu> cmd %p, error "
"x%x SNS x%x x%x Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
cmd->result, *lp, *(lp + 3), cmd->retries,
@@ -4390,7 +4390,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
struct lpfc_scsi_buf *lpfc_cmd,
- unsigned int lun,
+ uint64_t lun,
uint8_t task_mgmt_cmd)
{
struct lpfc_iocbq *piocbq;
@@ -4719,12 +4719,12 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
atomic_dec(&ndlp->cmd_pending);
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"3376 FCP could not issue IOCB err %x"
- "FCP cmd x%x <%d/%d> "
+ "FCP cmd x%x <%d/%llu> "
"sid: x%x did: x%x oxid: x%x "
"Data: x%x x%x x%x x%x\n",
err, cmnd->cmnd[0],
cmnd->device ? cmnd->device->id : 0xffff,
- cmnd->device ? cmnd->device->lun : 0xffff,
+ cmnd->device ? cmnd->device->lun : (u64) -1,
vport->fc_myDID, ndlp->nlp_DID,
phba->sli_rev == LPFC_SLI_REV4 ?
lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
@@ -4807,7 +4807,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
spin_unlock_irqrestore(&phba->hbalock, flags);
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
- "x%x ID %d LUN %d\n",
+ "x%x ID %d LUN %llu\n",
SUCCESS, cmnd->device->id, cmnd->device->lun);
return SUCCESS;
}
@@ -4924,7 +4924,7 @@ wait_for_cmpl:
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0748 abort handler timed out waiting "
"for abortng I/O (xri:x%x) to complete: "
- "ret %#x, ID %d, LUN %d\n",
+ "ret %#x, ID %d, LUN %llu\n",
iocb->sli4_xritag, ret,
cmnd->device->id, cmnd->device->lun);
}
@@ -4935,7 +4935,7 @@ out_unlock:
out:
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"0749 SCSI Layer I/O Abort Request Status x%x ID %d "
- "LUN %d\n", ret, cmnd->device->id,
+ "LUN %llu\n", ret, cmnd->device->id,
cmnd->device->lun);
return ret;
}
@@ -5047,7 +5047,7 @@ lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd)
**/
static int
lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
- unsigned tgt_id, unsigned int lun_id,
+ unsigned tgt_id, uint64_t lun_id,
uint8_t task_mgmt_cmd)
{
struct lpfc_hba *phba = vport->phba;
@@ -5083,7 +5083,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
- "0702 Issue %s to TGT %d LUN %d "
+ "0702 Issue %s to TGT %d LUN %llu "
"rpi x%x nlp_flag x%x Data: x%x x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
@@ -5094,7 +5094,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
if ((status != IOCB_SUCCESS) ||
(iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "
+ "0727 TMF %s to TGT %d LUN %llu failed (%d, %d) "
"iocb_flag x%x\n",
lpfc_taskmgmt_name(task_mgmt_cmd),
tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,
@@ -5238,7 +5238,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
- unsigned int lun_id = cmnd->device->lun;
+ uint64_t lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
@@ -5273,7 +5273,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
FCP_LUN_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0713 SCSI layer issued Device Reset (%d, %d) "
+ "0713 SCSI layer issued Device Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
/*
@@ -5308,7 +5308,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_rport_data *rdata;
struct lpfc_nodelist *pnode;
unsigned tgt_id = cmnd->device->id;
- unsigned int lun_id = cmnd->device->lun;
+ uint64_t lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
@@ -5343,7 +5343,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
FCP_TARGET_RESET);
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
- "0723 SCSI layer issued Target Reset (%d, %d) "
+ "0723 SCSI layer issued Target Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
/*
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index b7770516f4c2..ac5d94cfd52f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -1860,7 +1860,7 @@ megaraid_info(struct Scsi_Host *host)
"LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
adapter->fw_version, adapter->product_info.max_commands,
adapter->host->max_id, adapter->host->max_channel,
- adapter->host->max_lun);
+ (u32)adapter->host->max_lun);
return buffer;
}
@@ -1941,8 +1941,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n",
(aor == SCB_ABORT)? "ABORTING":"RESET",
- cmd->cmnd[0], cmd->device->channel,
- cmd->device->id, cmd->device->lun);
+ cmd->cmnd[0], cmd->device->channel,
+ cmd->device->id, (u32)cmd->device->lun);
if(list_empty(&adapter->pending_list))
return FALSE;
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
index 5ead1283a844..1d037ed52c33 100644
--- a/drivers/scsi/megaraid/mega_common.h
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -204,7 +204,7 @@ typedef struct {
#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state
#define SCP2CHANNEL(scp) (scp)->device->channel // to channel
#define SCP2TARGET(scp) (scp)->device->id // to target
-#define SCP2LUN(scp) (scp)->device->lun // to LUN
+#define SCP2LUN(scp) (u32)(scp)->device->lun // to LUN
// generic macro to convert scsi command and host to controller's soft state
#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0])
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index e2237a97cb9d..531dce419c18 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -998,8 +998,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev,
- sizeof(mbox64_t), &raid_dev->una_mbox64_dma);
+ raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
+ sizeof(mbox64_t),
+ &raid_dev->una_mbox64_dma);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
@@ -1007,7 +1008,6 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
__LINE__));
return -1;
}
- memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t));
/*
* Align the mailbox at 16-byte boundary
@@ -1026,8 +1026,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
align;
// Allocate memory for commands issued internally
- adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h);
+ adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
@@ -1036,7 +1036,6 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
goto out_free_common_mbox;
}
- memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);
// Allocate memory for our SCSI Command Blocks and their associated
// memory
@@ -2972,8 +2971,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h);
+ pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
@@ -2982,7 +2981,6 @@ megaraid_mbox_product_info(adapter_t *adapter)
return -1;
}
- memset(pinfo, 0, sizeof(mraid_pinfo_t));
mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 112799b131a9..22a04e37b70a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -2038,9 +2038,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
if (initial) {
instance->hb_host_mem =
- pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h);
+ pci_zalloc_consistent(instance->pdev,
+ sizeof(struct MR_CTRL_HB_HOST_MEM),
+ &instance->hb_host_mem_h);
if (!instance->hb_host_mem) {
printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for "
@@ -2048,8 +2048,6 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
retval = -ENOMEM;
goto out;
}
- memset(instance->hb_host_mem, 0,
- sizeof(struct MR_CTRL_HB_HOST_MEM));
}
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 22600419ae9f..3ed03dfab76c 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1690,7 +1690,7 @@ NonFastPath:
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
- io_request->LUN[1] = scmd->device->lun;
+ int_to_scsilun(scmd->device->lun, (struct scsi_lun *)io_request->LUN);
}
/**
@@ -1713,7 +1713,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(instance, scp);
/* Zero out some fields so they don't get reused */
- io_request->LUN[1] = 0;
+ memset(io_request->LUN, 0x0, 8);
io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
io_request->EEDPFlags = 0;
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index e8a04ae3276a..57a95e2c3442 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1230,7 +1230,7 @@ static void handle_msgin(struct mesh_state *ms)
ms->msgphase = msg_out;
} else if (code != cmd->device->lun + IDENTIFY_BASE) {
printk(KERN_WARNING "mesh: lun mismatch "
- "(%d != %d) on reselection from "
+ "(%d != %llu) on reselection from "
"target %d\n", code - IDENTIFY_BASE,
cmd->device->lun, ms->conn_tgt);
}
@@ -1915,14 +1915,12 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- dma_cmd_space = pci_alloc_consistent(macio_get_pci_dev(mdev),
- ms->dma_cmd_size,
- &dma_cmd_bus);
+ dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev),
+ ms->dma_cmd_size, &dma_cmd_bus);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
}
- memset(dma_cmd_space, 0, ms->dma_cmd_size);
ms->dma_cmds = (struct dbdma_cmd *) DBDMA_ALIGN(dma_cmd_space);
ms->dma_cmd_space = dma_cmd_space;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 8b88118e20e6..2f262be890c5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -277,7 +277,7 @@ mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
ioc->fault_reset_work_q = NULL;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (wq) {
- if (!cancel_delayed_work(&ioc->fault_reset_work))
+ if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
flush_workqueue(wq);
destroy_workqueue(wq);
}
@@ -1332,53 +1332,35 @@ _base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
static void
_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
{
- struct adapter_reply_queue *reply_q;
- int cpu_id;
- int cpu_grouping, loop, grouping, grouping_mod;
+ unsigned int cpu, nr_cpus, nr_msix, index = 0;
if (!_base_is_controller_msix_enabled(ioc))
return;
memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
- /* when there are more cpus than available msix vectors,
- * then group cpus togeather on same irq
- */
- if (ioc->cpu_count > ioc->msix_vector_count) {
- grouping = ioc->cpu_count / ioc->msix_vector_count;
- grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
- if (grouping < 2 || (grouping == 2 && !grouping_mod))
- cpu_grouping = 2;
- else if (grouping < 4 || (grouping == 4 && !grouping_mod))
- cpu_grouping = 4;
- else if (grouping < 8 || (grouping == 8 && !grouping_mod))
- cpu_grouping = 8;
- else
- cpu_grouping = 16;
- } else
- cpu_grouping = 0;
-
- loop = 0;
- reply_q = list_entry(ioc->reply_queue_list.next,
- struct adapter_reply_queue, list);
- for_each_online_cpu(cpu_id) {
- if (!cpu_grouping) {
- ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
- reply_q = list_entry(reply_q->list.next,
- struct adapter_reply_queue, list);
- } else {
- if (loop < cpu_grouping) {
- ioc->cpu_msix_table[cpu_id] =
- reply_q->msix_index;
- loop++;
- } else {
- reply_q = list_entry(reply_q->list.next,
- struct adapter_reply_queue, list);
- ioc->cpu_msix_table[cpu_id] =
- reply_q->msix_index;
- loop = 1;
- }
+
+ nr_cpus = num_online_cpus();
+ nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
+ ioc->facts.MaxMSIxVectors);
+ if (!nr_msix)
+ return;
+
+ cpu = cpumask_first(cpu_online_mask);
+
+ do {
+ unsigned int i, group = nr_cpus / nr_msix;
+
+ if (index < nr_cpus % nr_msix)
+ group++;
+
+ for (i = 0 ; i < group ; i++) {
+ ioc->cpu_msix_table[cpu] = index;
+ cpu = cpumask_next(cpu, cpu_online_mask);
}
- }
+
+ index++;
+
+ } while (cpu < nr_cpus);
}
/**
@@ -4295,12 +4277,13 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
goto out_free_resources;
if (ioc->is_warpdrive) {
- ioc->reply_post_host_index[0] =
- (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
+ ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
+ &ioc->chip->ReplyPostHostIndex;
for (i = 1; i < ioc->cpu_msix_table_sz; i++)
- ioc->reply_post_host_index[i] = (resource_size_t *)
- ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
+ ioc->reply_post_host_index[i] =
+ (resource_size_t __iomem *)
+ ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
* 4)));
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index fd3b998c75b1..0ac5815a7f91 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -837,7 +837,7 @@ struct MPT2SAS_ADAPTER {
u8 msix_enable;
u16 msix_vector_count;
u8 *cpu_msix_table;
- resource_size_t **reply_post_host_index;
+ resource_size_t __iomem **reply_post_host_index;
u16 cpu_msix_table_sz;
u32 ioc_reset_count;
MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 5055f925d2cd..dd461015813f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -173,7 +173,7 @@ struct fw_event_work {
u8 VP_ID;
u8 ignore;
u16 event;
- void *event_data;
+ char event_data[0] __aligned(4);
};
/* raid transport support */
@@ -1292,7 +1292,8 @@ _scsih_target_alloc(struct scsi_target *starget)
unsigned long flags;
struct sas_rphy *rphy;
- sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL);
+ sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
+ GFP_KERNEL);
if (!sas_target_priv_data)
return -ENOMEM;
@@ -1406,7 +1407,8 @@ _scsih_slave_alloc(struct scsi_device *sdev)
struct _sas_device *sas_device;
unsigned long flags;
- sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
+ sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
+ GFP_KERNEL);
if (!sas_device_priv_data)
return -ENOMEM;
@@ -2832,7 +2834,6 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_del(&fw_event->list);
- kfree(fw_event->event_data);
kfree(fw_event);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -2899,11 +2900,10 @@ _scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
return;
list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
- if (cancel_delayed_work(&fw_event->delayed_work)) {
+ if (cancel_delayed_work_sync(&fw_event->delayed_work)) {
_scsih_fw_event_free(ioc, fw_event);
continue;
}
- fw_event->cancel_pending_work = 1;
}
}
@@ -3518,7 +3518,8 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
fw_event->ignore)
continue;
- local_event_data = fw_event->event_data;
+ local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
if (local_event_data->ExpStatus ==
MPI2_EVENT_SAS_TOPO_ES_ADDED ||
local_event_data->ExpStatus ==
@@ -5502,7 +5503,9 @@ _scsih_sas_topology_change_event(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address;
unsigned long flags;
u8 link_rate, prev_link_rate;
- Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasTopologyChangeList_t *event_data =
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -5697,7 +5700,8 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
u64 sas_address;
unsigned long flags;
Mpi2EventDataSasDeviceStatusChange_t *event_data =
- fw_event->event_data;
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ fw_event->event_data;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -5792,6 +5796,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ (Mpi2EventDataSasEnclDevStatusChange_t *)
fw_event->event_data);
#endif
}
@@ -5816,7 +5821,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc,
u32 termination_count;
u32 query_count;
Mpi2SCSITaskManagementReply_t *mpi_reply;
- Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ fw_event->event_data;
u16 ioc_status;
unsigned long flags;
int r;
@@ -5967,7 +5974,9 @@ static void
_scsih_sas_discovery_event(struct MPT2SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
- Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)
+ fw_event->event_data;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
@@ -6355,7 +6364,9 @@ _scsih_sas_ir_config_change_event(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element;
int i;
u8 foreign_config;
- Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrConfigChangeList_t *event_data =
+ (Mpi2EventDataIrConfigChangeList_t *)
+ fw_event->event_data;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -6423,7 +6434,9 @@ _scsih_sas_ir_volume_event(struct MPT2SAS_ADAPTER *ioc,
u16 handle;
u32 state;
int rc;
- Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrVolume_t *event_data =
+ (Mpi2EventDataIrVolume_t *)
+ fw_event->event_data;
if (ioc->shost_recovery)
return;
@@ -6507,7 +6520,9 @@ _scsih_sas_ir_physical_disk_event(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
u32 ioc_status;
- Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrPhysicalDisk_t *event_data =
+ (Mpi2EventDataIrPhysicalDisk_t *)
+ fw_event->event_data;
u64 sas_address;
if (ioc->shost_recovery)
@@ -6630,7 +6645,9 @@ static void
_scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
- Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrOperationStatus_t *event_data =
+ (Mpi2EventDataIrOperationStatus_t *)
+ fw_event->event_data;
static struct _raid_device *raid_device;
unsigned long flags;
u16 handle;
@@ -7401,7 +7418,7 @@ _firmware_event_work(struct work_struct *work)
struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
/* the queue is being flushed so ignore this event */
- if (ioc->remove_host || fw_event->cancel_pending_work ||
+ if (ioc->remove_host ||
ioc->pci_error_recovery) {
_scsih_fw_event_free(ioc, fw_event);
return;
@@ -7590,23 +7607,15 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
return;
}
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
- if (!fw_event) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return;
- }
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
- fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
- if (!fw_event->event_data) {
+ fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC);
+ if (!fw_event) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- kfree(fw_event);
return;
}
- memcpy(fw_event->event_data, mpi_reply->EventData,
- sz);
+ memcpy(fw_event->event_data, mpi_reply->EventData, sz);
fw_event->ioc = ioc;
fw_event->VF_ID = mpi_reply->VF_ID;
fw_event->VP_ID = mpi_reply->VP_ID;
@@ -7857,9 +7866,9 @@ _scsih_remove(struct pci_dev *pdev)
}
sas_remove_host(shost);
+ scsi_remove_host(shost);
mpt2sas_base_detach(ioc);
list_del(&ioc->list);
- scsi_remove_host(shost);
scsi_host_put(shost);
}
@@ -8200,13 +8209,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- if ((scsi_add_host(shost, &pdev->dev))) {
- printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- list_del(&ioc->list);
- goto out_add_shost_fail;
- }
-
/* register EEDP capabilities with SCSI layer */
if (prot_mask)
scsi_host_set_prot(shost, prot_mask);
@@ -8248,16 +8250,23 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
} else
ioc->hide_drives = 0;
+
+ if ((scsi_add_host(shost, &pdev->dev))) {
+ printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_add_shost_fail;
+ }
+
scsi_scan_host(shost);
return 0;
+ out_add_shost_fail:
+ mpt2sas_base_detach(ioc);
out_attach_fail:
destroy_workqueue(ioc->firmware_event_thread);
out_thread_fail:
list_del(&ioc->list);
- scsi_remove_host(shost);
- out_add_shost_fail:
scsi_host_put(shost);
return -ENODEV;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 0cf4f7000f94..93ce2b2baa41 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -266,7 +266,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
ioc->fault_reset_work_q = NULL;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
if (wq) {
- if (!cancel_delayed_work(&ioc->fault_reset_work))
+ if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
flush_workqueue(wq);
destroy_workqueue(wq);
}
@@ -1624,66 +1624,35 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
- struct adapter_reply_queue *reply_q;
- int cpu_id;
- int cpu_grouping, loop, grouping, grouping_mod;
- int reply_queue;
+ unsigned int cpu, nr_cpus, nr_msix, index = 0;
if (!_base_is_controller_msix_enabled(ioc))
return;
memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
- /* NUMA Hardware bug workaround - drop to less reply queues */
- if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) {
- ioc->reply_queue_count = ioc->facts.MaxMSIxVectors;
- reply_queue = 0;
- list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
- reply_q->msix_index = reply_queue;
- if (++reply_queue == ioc->reply_queue_count)
- reply_queue = 0;
- }
- }
+ nr_cpus = num_online_cpus();
+ nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
+ ioc->facts.MaxMSIxVectors);
+ if (!nr_msix)
+ return;
- /* when there are more cpus than available msix vectors,
- * then group cpus togeather on same irq
- */
- if (ioc->cpu_count > ioc->msix_vector_count) {
- grouping = ioc->cpu_count / ioc->msix_vector_count;
- grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
- if (grouping < 2 || (grouping == 2 && !grouping_mod))
- cpu_grouping = 2;
- else if (grouping < 4 || (grouping == 4 && !grouping_mod))
- cpu_grouping = 4;
- else if (grouping < 8 || (grouping == 8 && !grouping_mod))
- cpu_grouping = 8;
- else
- cpu_grouping = 16;
- } else
- cpu_grouping = 0;
-
- loop = 0;
- reply_q = list_entry(ioc->reply_queue_list.next,
- struct adapter_reply_queue, list);
- for_each_online_cpu(cpu_id) {
- if (!cpu_grouping) {
- ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
- reply_q = list_entry(reply_q->list.next,
- struct adapter_reply_queue, list);
- } else {
- if (loop < cpu_grouping) {
- ioc->cpu_msix_table[cpu_id] =
- reply_q->msix_index;
- loop++;
- } else {
- reply_q = list_entry(reply_q->list.next,
- struct adapter_reply_queue, list);
- ioc->cpu_msix_table[cpu_id] =
- reply_q->msix_index;
- loop = 1;
- }
+ cpu = cpumask_first(cpu_online_mask);
+
+ do {
+ unsigned int i, group = nr_cpus / nr_msix;
+
+ if (index < nr_cpus % nr_msix)
+ group++;
+
+ for (i = 0 ; i < group ; i++) {
+ ioc->cpu_msix_table[cpu] = index;
+ cpu = cpumask_next(cpu, cpu_online_mask);
}
- }
+
+ index++;
+
+ } while (cpu < nr_cpus);
}
/**
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 18e713db1d32..7cf48c5c15a7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -112,8 +112,8 @@ MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPT3SAS_MAX_LUN (16895)
-static int max_lun = MPT3SAS_MAX_LUN;
-module_param(max_lun, int, 0);
+static u64 max_lun = MPT3SAS_MAX_LUN;
+module_param(max_lun, ullong, 0);
MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
@@ -190,7 +190,7 @@ struct fw_event_work {
u8 VP_ID;
u8 ignore;
u16 event;
- void *event_data;
+ char event_data[0] __aligned(4);
};
/* raid transport support */
@@ -1163,7 +1163,8 @@ _scsih_target_alloc(struct scsi_target *starget)
unsigned long flags;
struct sas_rphy *rphy;
- sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL);
+ sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
+ GFP_KERNEL);
if (!sas_target_priv_data)
return -ENOMEM;
@@ -1277,7 +1278,8 @@ _scsih_slave_alloc(struct scsi_device *sdev)
struct _sas_device *sas_device;
unsigned long flags;
- sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
+ sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
+ GFP_KERNEL);
if (!sas_device_priv_data)
return -ENOMEM;
@@ -2490,7 +2492,6 @@ _scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_del(&fw_event->list);
- kfree(fw_event->event_data);
kfree(fw_event);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -2511,12 +2512,10 @@ mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->is_driver_loading)
return;
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ fw_event = kzalloc(sizeof(*fw_event) + sizeof(*event_data),
+ GFP_ATOMIC);
if (!fw_event)
return;
- fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC);
- if (!fw_event->event_data)
- return;
fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
fw_event->ioc = ioc;
memcpy(fw_event->event_data, event_data, sizeof(*event_data));
@@ -2582,11 +2581,10 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
return;
list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
- if (cancel_delayed_work(&fw_event->delayed_work)) {
+ if (cancel_delayed_work_sync(&fw_event->delayed_work)) {
_scsih_fw_event_free(ioc, fw_event);
continue;
}
- fw_event->cancel_pending_work = 1;
}
}
@@ -3211,7 +3209,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
fw_event->ignore)
continue;
- local_event_data = fw_event->event_data;
+ local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
if (local_event_data->ExpStatus ==
MPI2_EVENT_SAS_TOPO_ES_ADDED ||
local_event_data->ExpStatus ==
@@ -5043,7 +5042,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address;
unsigned long flags;
u8 link_rate, prev_link_rate;
- Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasTopologyChangeList_t *event_data =
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ fw_event->event_data;
#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -5241,7 +5242,8 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address;
unsigned long flags;
Mpi2EventDataSasDeviceStatusChange_t *event_data =
- fw_event->event_data;
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ fw_event->event_data;
#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -5337,6 +5339,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ (Mpi2EventDataSasEnclDevStatusChange_t *)
fw_event->event_data);
#endif
}
@@ -5361,7 +5364,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
u32 termination_count;
u32 query_count;
Mpi2SCSITaskManagementReply_t *mpi_reply;
- Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ fw_event->event_data;
u16 ioc_status;
unsigned long flags;
int r;
@@ -5513,7 +5518,8 @@ static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
- Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data;
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
@@ -5999,7 +6005,9 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventIrConfigElement_t *element;
int i;
u8 foreign_config;
- Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrConfigChangeList_t *event_data =
+ (Mpi2EventDataIrConfigChangeList_t *)
+ fw_event->event_data;
#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
@@ -6069,7 +6077,8 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
u16 handle;
u32 state;
int rc;
- Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrVolume_t *event_data =
+ (Mpi2EventDataIrVolume_t *) fw_event->event_data;
if (ioc->shost_recovery)
return;
@@ -6152,7 +6161,8 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
Mpi2ConfigReply_t mpi_reply;
Mpi2SasDevicePage0_t sas_device_pg0;
u32 ioc_status;
- Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrPhysicalDisk_t *event_data =
+ (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
u64 sas_address;
if (ioc->shost_recovery)
@@ -6272,7 +6282,9 @@ static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
struct fw_event_work *fw_event)
{
- Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+ Mpi2EventDataIrOperationStatus_t *event_data =
+ (Mpi2EventDataIrOperationStatus_t *)
+ fw_event->event_data;
static struct _raid_device *raid_device;
unsigned long flags;
u16 handle;
@@ -7026,7 +7038,7 @@ static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
{
/* the queue is being flushed so ignore this event */
- if (ioc->remove_host || fw_event->cancel_pending_work ||
+ if (ioc->remove_host ||
ioc->pci_error_recovery) {
_scsih_fw_event_free(ioc, fw_event);
return;
@@ -7034,7 +7046,9 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
switch (fw_event->event) {
case MPT3SAS_PROCESS_TRIGGER_DIAG:
- mpt3sas_process_trigger_data(ioc, fw_event->event_data);
+ mpt3sas_process_trigger_data(ioc,
+ (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
+ fw_event->event_data);
break;
case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
@@ -7192,18 +7206,11 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
return 1;
}
- fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
- if (!fw_event) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- return 1;
- }
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
- fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
- if (!fw_event->event_data) {
+ fw_event = kzalloc(sizeof(*fw_event) + sz, GFP_ATOMIC);
+ if (!fw_event) {
pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- kfree(fw_event);
return 1;
}
@@ -7431,9 +7438,9 @@ static void _scsih_remove(struct pci_dev *pdev)
}
sas_remove_host(shost);
+ scsi_remove_host(shost);
mpt3sas_base_detach(ioc);
list_del(&ioc->list);
- scsi_remove_host(shost);
scsi_host_put(shost);
}
@@ -7801,13 +7808,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- if ((scsi_add_host(shost, &pdev->dev))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
- list_del(&ioc->list);
- goto out_add_shost_fail;
- }
-
/* register EEDP capabilities with SCSI layer */
if (prot_mask > 0)
scsi_host_set_prot(shost, prot_mask);
@@ -7835,15 +7835,21 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->name, __FILE__, __LINE__, __func__);
goto out_attach_fail;
}
+ if ((scsi_add_host(shost, &pdev->dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ list_del(&ioc->list);
+ goto out_add_shost_fail;
+ }
+
scsi_scan_host(shost);
return 0;
-
+out_add_shost_fail:
+ mpt3sas_base_detach(ioc);
out_attach_fail:
destroy_workqueue(ioc->firmware_event_thread);
out_thread_fail:
list_del(&ioc->list);
- scsi_remove_host(shost);
- out_add_shost_fail:
scsi_host_put(shost);
return -ENODEV;
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 6c1f223a8e1d..ac52f7c99513 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1344,19 +1344,23 @@ void mvs_dev_gone_notify(struct domain_device *dev)
{
unsigned long flags = 0;
struct mvs_device *mvi_dev = dev->lldd_dev;
- struct mvs_info *mvi = mvi_dev->mvi_info;
-
- spin_lock_irqsave(&mvi->lock, flags);
+ struct mvs_info *mvi;
- if (mvi_dev) {
- mv_dprintk("found dev[%d:%x] is gone.\n",
- mvi_dev->device_id, mvi_dev->dev_type);
- mvs_release_task(mvi, dev);
- mvs_free_reg_set(mvi, mvi_dev);
- mvs_free_dev(mvi_dev);
- } else {
+ if (!mvi_dev) {
mv_dprintk("found dev has gone.\n");
+ return;
}
+
+ mvi = mvi_dev->mvi_info;
+
+ spin_lock_irqsave(&mvi->lock, flags);
+
+ mv_dprintk("found dev[%d:%x] is gone.\n",
+ mvi_dev->device_id, mvi_dev->dev_type);
+ mvs_release_task(mvi, dev);
+ mvs_free_reg_set(mvi, mvi_dev);
+ mvs_free_dev(mvi_dev);
+
dev->lldd_dev = NULL;
mvi_dev->sas_device = NULL;
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index edbee8dc62c9..3e716b2f611a 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -142,8 +142,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
- res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
- &res->bus_addr);
+ res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
+ &res->bus_addr);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
@@ -151,7 +151,6 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
kfree(res);
return NULL;
}
- memset(res->virt_addr, 0, size);
break;
default:
@@ -258,12 +257,10 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (size == 0)
return 0;
- virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
+ virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
if (!virt_addr)
return -1;
- memset(virt_addr, 0, size);
-
m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
cmd->frame->sg_counts = 1;
cmd->data_buf = virt_addr;
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 7d014b11df62..a7305ffc359d 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -6633,7 +6633,7 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
** patch requested size into sense command
*/
cp->sensecmd[0] = 0x03;
- cp->sensecmd[1] = cmd->device->lun << 5;
+ cp->sensecmd[1] = (cmd->device->lun & 0x7) << 5;
cp->sensecmd[4] = sizeof(cp->sense_buf);
/*
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index 0e008dacf679..02901c54b08b 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -264,11 +264,7 @@
#define SCSI_NCR_SG_TABLESIZE (SCSI_NCR_MAX_SCATTER)
#define SCSI_NCR_TIMER_INTERVAL (HZ)
-#if 1 /* defined CONFIG_SCSI_MULTI_LUN */
#define SCSI_NCR_MAX_LUN (16)
-#else
-#define SCSI_NCR_MAX_LUN (1)
-#endif
/*
* IO functions definition for big/little endian CPU support.
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 0665f9cfdb02..50b086aef178 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -915,7 +915,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
int ret;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
- "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
+ "enter. target: 0x%x LUN: 0x%llu cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
index 0d78a4d5576c..80bacb5dc1d4 100644
--- a/drivers/scsi/pas16.c
+++ b/drivers/scsi/pas16.c
@@ -607,8 +607,6 @@ static int pas16_release(struct Scsi_Host *shost)
if (shost->irq)
free_irq(shost->irq, shost);
NCR5380_exit(shost);
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port);
scsi_unregister(shost);
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 987fbb1b244e..340ceff03823 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -195,7 +195,7 @@ static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt,
nsp_hw_data *data = (nsp_hw_data *)SCpnt->device->host->hostdata;
nsp_dbg(NSP_DEBUG_QUEUECOMMAND,
- "SCpnt=0x%p target=%d lun=%d sglist=0x%p bufflen=%d sg_count=%d",
+ "SCpnt=0x%p target=%d lun=%llu sglist=0x%p bufflen=%d sg_count=%d",
SCpnt, target, SCpnt->device->lun, scsi_sglist(SCpnt),
scsi_bufflen(SCpnt), scsi_sg_count(SCpnt));
//nsp_dbg(NSP_DEBUG_QUEUECOMMAND, "before CurrentSC=0x%p", data->CurrentSC);
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index f5b52731abd9..155f9573021f 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -558,7 +558,7 @@ SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id,
- SCpnt->device->lun, scsi_bufflen(SCpnt)));
+ (u8)SCpnt->device->lun, scsi_bufflen(SCpnt)));
VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index a368d77b8d41..d3a08aea0948 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -397,7 +397,10 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
payload.func_specific = kzalloc(4096, GFP_KERNEL);
if (!payload.func_specific)
return -ENOMEM;
- PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ if (PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload)) {
+ kfree(payload.func_specific);
+ return -ENOMEM;
+ }
wait_for_completion(&completion);
virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
@@ -614,11 +617,11 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->nvmd_completion = &completion;
ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
+ if (ret)
+ break;
wait_for_completion(&completion);
- if (ret || (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS)) {
+ if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
ret = fwControl->retcode;
- kfree(ioctlbuffer);
- ioctlbuffer = NULL;
break;
}
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index a97be015e52e..173831016f5f 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1346,7 +1346,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
&pMessage) < 0) {
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("No free mpi buffer\n"));
- return -1;
+ return -ENOMEM;
}
BUG_ON(!payload);
/*Copy to the payload*/
@@ -1751,6 +1751,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.tag = cpu_to_le32(ccb_tag);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
+ if (ret)
+ pm8001_tag_free(pm8001_ha, ccb_tag);
}
@@ -1778,6 +1780,7 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res) {
+ sas_free_task(task);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("cannot allocate tag !!!\n"));
return;
@@ -1788,14 +1791,14 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
*/
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Domain device cannot be allocated\n"));
- sas_free_task(task);
return;
- } else {
- task->dev = dev;
- task->dev->lldd_dev = pm8001_ha_dev;
}
+ task->dev = dev;
+ task->dev->lldd_dev = pm8001_ha_dev;
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_ha_dev;
@@ -1821,7 +1824,11 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha,
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
-
+ if (res) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ kfree(dev);
+ }
}
/**
@@ -3100,7 +3107,7 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,
complete(pm8001_dev->setds_completion);
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_ccb_free(pm8001_ha, tag);
+ pm8001_tag_free(pm8001_ha, tag);
}
void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3119,13 +3126,12 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_ccb_free(pm8001_ha, tag);
+ pm8001_tag_free(pm8001_ha, tag);
}
void
pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
{
- struct fw_control_ex *fw_control_context;
struct get_nvm_data_resp *pPayload =
(struct get_nvm_data_resp *)(piomb + 4);
u32 tag = le32_to_cpu(pPayload->tag);
@@ -3134,7 +3140,6 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 ir_tds_bn_dps_das_nvm =
le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
- fw_control_context = ccb->fw_control_context;
PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
if ((dlen_status & NVMD_STAT) != 0) {
@@ -3175,13 +3180,11 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
(dlen_status & NVMD_LEN) >> 24));
}
- memcpy(fw_control_context->usrAddr,
- pm8001_ha->memoryMap.region[NVMD].virt_ptr,
- fw_control_context->len);
- complete(pm8001_ha->nvmd_completion);
+ kfree(ccb->fw_control_context);
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_ccb_free(pm8001_ha, tag);
+ pm8001_tag_free(pm8001_ha, tag);
+ complete(pm8001_ha->nvmd_completion);
}
int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3588,7 +3591,7 @@ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
complete(pm8001_dev->dcompletion);
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_ccb_free(pm8001_ha, htag);
+ pm8001_tag_free(pm8001_ha, htag);
return 0;
}
@@ -3617,15 +3620,11 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb)
{
u32 status;
- struct fw_control_ex fw_control_context;
struct fw_flash_Update_resp *ppayload =
(struct fw_flash_Update_resp *)(piomb + 4);
u32 tag = le32_to_cpu(ppayload->tag);
struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
status = le32_to_cpu(ppayload->status);
- memcpy(&fw_control_context,
- ccb->fw_control_context,
- sizeof(fw_control_context));
switch (status) {
case FLASH_UPDATE_COMPLETE_PENDING_REBOOT:
PM8001_MSG_DBG(pm8001_ha,
@@ -3668,11 +3667,11 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
pm8001_printk("No matched status = %d\n", status));
break;
}
- ccb->fw_control_context->fw_control->retcode = status;
- complete(pm8001_ha->nvmd_completion);
+ kfree(ccb->fw_control_context);
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
- pm8001_ccb_free(pm8001_ha, tag);
+ pm8001_tag_free(pm8001_ha, tag);
+ complete(pm8001_ha->nvmd_completion);
return 0;
}
@@ -4257,7 +4256,11 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
smp_cmd.long_smp_req.long_resp_size =
cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4);
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ (u32 *)&smp_cmd, 0);
+ if (rc)
+ goto err_out_2;
+
return 0;
err_out_2:
@@ -4398,7 +4401,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
/* Check for read log for failed drive and return */
if (sata_cmd.sata_fis.command == 0x2f) {
- if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
+ if (((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) ||
(pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) ||
(pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) {
struct task_status_struct *ts;
@@ -4789,6 +4792,10 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
}
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_tag_free(pm8001_ha, tag);
+ }
return rc;
}
@@ -4869,6 +4876,10 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
break;
}
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0);
+ if (rc) {
+ kfree(fw_control_context);
+ pm8001_tag_free(pm8001_ha, tag);
+ }
return rc;
}
@@ -5061,7 +5072,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
memset(&payload, 0, sizeof(payload));
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc)
- return -1;
+ return -ENOMEM;
ccb = &pm8001_ha->ccb_info[tag];
ccb->ccb_tag = tag;
circularQ = &pm8001_ha->inbnd_q_tbl[0];
@@ -5070,6 +5081,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha)
payload.sata_hol_tmo = cpu_to_le32(80);
payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
return rc;
}
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index e90c89f1d480..e49623a897a7 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -246,6 +246,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,
{
int i;
spin_lock_init(&pm8001_ha->lock);
+ spin_lock_init(&pm8001_ha->bitmap_lock);
PM8001_INIT_DBG(pm8001_ha,
pm8001_printk("pm8001_alloc: PHY:%x\n",
pm8001_ha->chip->n_phy));
@@ -621,6 +622,8 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
u16 deviceid;
+ int rc;
+
pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
pm8001_ha->nvmd_completion = &completion;
@@ -638,7 +641,16 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
}
payload.offset = 0;
payload.func_specific = kzalloc(payload.length, GFP_KERNEL);
- PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ if (!payload.func_specific) {
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("mem alloc fail\n"));
+ return;
+ }
+ rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ if (rc) {
+ kfree(payload.func_specific);
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ return;
+ }
wait_for_completion(&completion);
for (i = 0, j = 0; i <= 7; i++, j++) {
@@ -661,6 +673,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
pm8001_printk("phy %d sas_addr = %016llx\n", i,
pm8001_ha->phy[i].dev_sas_addr));
}
+ kfree(payload.func_specific);
#else
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
pm8001_ha->phy[i].dev_sas_addr = 0x50010c600047f9d0ULL;
@@ -684,6 +697,7 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
/*OPTION ROM FLASH read for the SPC cards */
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_ioctl_payload payload;
+ int rc;
pm8001_ha->nvmd_completion = &completion;
/* SAS ADDRESS read from flash / EEPROM */
@@ -694,7 +708,12 @@ static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
if (!payload.func_specific)
return -ENOMEM;
/* Read phy setting values from flash */
- PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
+ if (rc) {
+ kfree(payload.func_specific);
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("nvmd failed\n"));
+ return -ENOMEM;
+ }
wait_for_completion(&completion);
pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
kfree(payload.func_specific);
@@ -744,9 +763,10 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->irq_vector[i].irq_id = i;
pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
- if (request_irq(pm8001_ha->msix_entries[i].vector,
+ rc = request_irq(pm8001_ha->msix_entries[i].vector,
pm8001_interrupt_handler_msix, flag,
- intr_drvname[i], &(pm8001_ha->irq_vector[i]))) {
+ intr_drvname[i], &(pm8001_ha->irq_vector[i]));
+ if (rc) {
for (j = 0; j < i; j++)
free_irq(
pm8001_ha->msix_entries[j].vector,
@@ -964,6 +984,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
int i, j;
u32 device_state;
pm8001_ha = sha->lldd_ha;
+ sas_suspend_ha(sha);
flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
if (!pdev->pm_cap) {
@@ -1013,6 +1034,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
int rc;
u8 i = 0, j;
u32 device_state;
+ DECLARE_COMPLETION_ONSTACK(completion);
pm8001_ha = sha->lldd_ha;
device_state = pdev->current_state;
@@ -1033,7 +1055,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
rc = pci_go_44(pdev);
if (rc)
goto err_out_disable;
-
+ sas_prep_resume_ha(sha);
/* chip soft rst only for spc */
if (pm8001_ha->chip_id == chip_8001) {
PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha);
@@ -1065,7 +1087,13 @@ static int pm8001_pci_resume(struct pci_dev *pdev)
for (i = 1; i < pm8001_ha->number_of_intr; i++)
PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i);
}
- scsi_unblock_requests(pm8001_ha->shost);
+ pm8001_ha->flags = PM8001F_RUN_TIME;
+ for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
+ pm8001_ha->phy[i].enable_completion = &completion;
+ PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+ wait_for_completion(&completion);
+ }
+ sas_resume_ha(sha);
return 0;
err_out_disable:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 8a44bc92bc78..76570e6a547d 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -58,25 +58,14 @@ static int pm8001_find_tag(struct sas_task *task, u32 *tag)
}
/**
- * pm8001_tag_clear - clear the tags bitmap
+ * pm8001_tag_free - free the no more needed tag
* @pm8001_ha: our hba struct
* @tag: the found tag associated with the task
*/
-static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag)
-{
- void *bitmap = pm8001_ha->tags;
- clear_bit(tag, bitmap);
-}
-
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
{
- pm8001_tag_clear(pm8001_ha, tag);
-}
-
-static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag)
-{
void *bitmap = pm8001_ha->tags;
- set_bit(tag, bitmap);
+ clear_bit(tag, bitmap);
}
/**
@@ -86,14 +75,18 @@ static void pm8001_tag_set(struct pm8001_hba_info *pm8001_ha, u32 tag)
*/
inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
{
- unsigned int index, tag;
+ unsigned int tag;
void *bitmap = pm8001_ha->tags;
+ unsigned long flags;
- index = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
- tag = index;
- if (tag >= pm8001_ha->tags_num)
+ spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
+ tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
+ if (tag >= pm8001_ha->tags_num) {
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
return -SAS_QUEUE_FULL;
- pm8001_tag_set(pm8001_ha, tag);
+ }
+ set_bit(tag, bitmap);
+ spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
*tag_out = tag;
return 0;
}
@@ -102,7 +95,7 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
{
int i;
for (i = 0; i < pm8001_ha->tags_num; ++i)
- pm8001_tag_clear(pm8001_ha, i);
+ pm8001_tag_free(pm8001_ha, i);
}
/**
@@ -123,13 +116,12 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
u64 align_offset = 0;
if (align)
align_offset = (dma_addr_t)align - 1;
- mem_virt_alloc =
- pci_alloc_consistent(pdev, mem_size + align, &mem_dma_handle);
+ mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
+ &mem_dma_handle);
if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n");
return -1;
}
- memset((void *)mem_virt_alloc, 0, mem_size+align);
*pphys_addr = mem_dma_handle;
phys_align = (*pphys_addr + align_offset) & ~align_offset;
*virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
@@ -501,11 +493,6 @@ int pm8001_queue_command(struct sas_task *task, const int num,
return pm8001_task_exec(task, num, gfp_flags, 0, NULL);
}
-void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx)
-{
- pm8001_tag_clear(pm8001_ha, ccb_idx);
-}
-
/**
* pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
* @pm8001_ha: our hba card information
@@ -542,7 +529,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
ccb->open_retry = 0;
- pm8001_ccb_free(pm8001_ha, ccb_idx);
+ pm8001_tag_free(pm8001_ha, ccb_idx);
}
/**
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 1ee06f21803b..f6b2ac59dae4 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -475,6 +475,7 @@ struct pm8001_hba_info {
struct list_head list;
unsigned long flags;
spinlock_t lock;/* host-wide lock */
+ spinlock_t bitmap_lock;
struct pci_dev *pdev;/* our device */
struct device *dev;
struct pm8001_hba_memspace io_mem[6];
@@ -616,7 +617,6 @@ extern struct workqueue_struct *pm8001_wq;
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag);
-void pm8001_ccb_free(struct pm8001_hba_info *pm8001_ha, u32 ccb_idx);
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index d70587f96184..b06443a0db2d 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -856,6 +856,8 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
return rc;
}
@@ -936,6 +938,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha)
sizeof(SASProtocolTimerConfig_t));
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
return rc;
}
@@ -948,7 +952,7 @@ static int
pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
{
u32 scratch3_value;
- int ret;
+ int ret = -1;
/* Read encryption status from SCRATCH PAD 3 */
scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
@@ -982,7 +986,7 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->encrypt_info.status = 0xFFFFFFFF;
pm8001_ha->encrypt_info.cipher_mode = 0;
pm8001_ha->encrypt_info.sec_mode = 0;
- return 0;
+ ret = 0;
} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
SCRATCH_PAD3_ENC_DIS_ERR) {
pm8001_ha->encrypt_info.status =
@@ -1004,7 +1008,6 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
pm8001_ha->encrypt_info.sec_mode,
pm8001_ha->encrypt_info.status));
- ret = -1;
} else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) ==
SCRATCH_PAD3_ENC_ENA_ERR) {
@@ -1028,7 +1031,6 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha)
scratch3_value, pm8001_ha->encrypt_info.cipher_mode,
pm8001_ha->encrypt_info.sec_mode,
pm8001_ha->encrypt_info.status));
- ret = -1;
}
return ret;
}
@@ -1059,6 +1061,8 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha)
KEK_MGMT_SUBOP_KEYCARDUPDATE);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
return rc;
}
@@ -1383,8 +1387,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task->task_done = pm8001_task_done;
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
- if (res)
+ if (res) {
+ sas_free_task(task);
return;
+ }
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_ha_dev;
@@ -1399,7 +1405,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha,
task_abort.tag = cpu_to_le32(ccb_tag);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0);
-
+ if (ret) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ }
}
static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
@@ -1426,6 +1435,7 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res) {
+ sas_free_task(task);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("cannot allocate tag !!!\n"));
return;
@@ -1436,15 +1446,16 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
*/
dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC);
if (!dev) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Domain device cannot be allocated\n"));
- sas_free_task(task);
return;
- } else {
- task->dev = dev;
- task->dev->lldd_dev = pm8001_ha_dev;
}
+ task->dev = dev;
+ task->dev->lldd_dev = pm8001_ha_dev;
+
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_ha_dev;
ccb->ccb_tag = ccb_tag;
@@ -1469,7 +1480,11 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha,
memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis));
res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0);
-
+ if (res) {
+ sas_free_task(task);
+ pm8001_tag_free(pm8001_ha, ccb_tag);
+ kfree(dev);
+ }
}
/**
@@ -3815,7 +3830,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag,
&smp_cmd, pm8001_ha->smp_exp_mode, length);
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
+ (u32 *)&smp_cmd, 0);
+ if (rc)
+ goto err_out_2;
return 0;
err_out_2:
@@ -4406,6 +4424,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
SAS_ADDR_SIZE);
rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
return rc;
}
@@ -4484,7 +4504,9 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i));
j++;
}
- pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
+ if (rc)
+ pm8001_tag_free(pm8001_ha, tag);
}
void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index be8ce54f99b2..6f3275d020a0 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -237,7 +237,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
scsi_dev->host->unique_id,
scsi_dev->channel,
scsi_dev->id,
- scsi_dev->lun);
+ (u8)scsi_dev->lun);
if (RES_IS_GSCSI(res->cfg_entry)) {
scsi_dev->allow_restart = 1;
@@ -4213,9 +4213,9 @@ static ssize_t pmcraid_store_log_level(
{
struct Scsi_Host *shost;
struct pmcraid_instance *pinstance;
- unsigned long val;
+ u8 val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtou8(buf, 10, &val))
return -EINVAL;
/* log-level should be from 0 to 2 */
if (val > 2)
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index e6e2a30493e6..ef23fabe3924 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -78,7 +78,7 @@ static int ps3rom_slave_configure(struct scsi_device *scsi_dev)
struct ps3rom_private *priv = shost_priv(scsi_dev->host);
struct ps3_storage_device *dev = priv->dev;
- dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %u, channel %u\n", __func__,
+ dev_dbg(&dev->sbd.core, "%s:%u: id %u, lun %llu, channel %u\n", __func__,
__LINE__, scsi_dev->id, scsi_dev->lun, scsi_dev->channel);
/*
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index de5d0ae19d83..b64399153135 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -320,8 +320,8 @@ struct srb_iocb {
* defined in tsk_mgmt_entry struct
* for control_flags field in qla_fw.h.
*/
+ uint64_t lun;
uint32_t flags;
- uint32_t lun;
uint32_t data;
struct completion comp;
__le16 comp_status;
@@ -2529,8 +2529,8 @@ struct isp_operations {
void (*disable_intrs) (struct qla_hw_data *);
int (*abort_command) (srb_t *);
- int (*target_reset) (struct fc_port *, unsigned int, int);
- int (*lun_reset) (struct fc_port *, unsigned int, int);
+ int (*target_reset) (struct fc_port *, uint64_t, int);
+ int (*lun_reset) (struct fc_port *, uint64_t, int);
int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
uint8_t, uint8_t, uint16_t *, uint8_t);
int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d48dea8fab1b..d646540db3ac 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -113,7 +113,7 @@ extern int ql2xenabledif;
extern int ql2xenablehba_err_chk;
extern int ql2xtargetreset;
extern int ql2xdontresethba;
-extern unsigned int ql2xmaxlun;
+extern uint64_t ql2xmaxlun;
extern int ql2xmdcapmask;
extern int ql2xmdenable;
@@ -212,7 +212,7 @@ extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t);
extern int qla2x00_start_scsi(srb_t *sp);
extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
- uint16_t, uint16_t, uint8_t);
+ uint16_t, uint64_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern int qla24xx_dif_start_scsi(srb_t *);
extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t);
@@ -262,10 +262,10 @@ extern int
qla2x00_abort_command(srb_t *);
extern int
-qla2x00_abort_target(struct fc_port *, unsigned int, int);
+qla2x00_abort_target(struct fc_port *, uint64_t, int);
extern int
-qla2x00_lun_reset(struct fc_port *, unsigned int, int);
+qla2x00_lun_reset(struct fc_port *, uint64_t, int);
extern int
qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -339,12 +339,12 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
extern int qla24xx_abort_command(srb_t *);
extern int qla24xx_async_abort_command(srb_t *);
extern int
-qla24xx_abort_target(struct fc_port *, unsigned int, int);
+qla24xx_abort_target(struct fc_port *, uint64_t, int);
extern int
-qla24xx_lun_reset(struct fc_port *, unsigned int, int);
+qla24xx_lun_reset(struct fc_port *, uint64_t, int);
extern int
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *, unsigned int,
- unsigned int, enum nexus_wait_type);
+ uint64_t, enum nexus_wait_type);
extern int
qla2x00_system_error(scsi_qla_host_t *);
@@ -617,8 +617,8 @@ extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
extern irqreturn_t qlafx00_intr_handler(int, void *);
extern void qlafx00_enable_intrs(struct qla_hw_data *);
extern void qlafx00_disable_intrs(struct qla_hw_data *);
-extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);
-extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);
+extern int qlafx00_abort_target(fc_port_t *, uint64_t, int);
+extern int qlafx00_lun_reset(fc_port_t *, uint64_t, int);
extern int qlafx00_start_scsi(srb_t *);
extern int qlafx00_abort_isp(scsi_qla_host_t *);
extern int qlafx00_iospace_config(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index e2184412617d..46990f4ceb40 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1526,8 +1526,8 @@ try_fce:
FCE_SIZE, ha->fce, ha->fce_dma);
/* Allocate memory for Fibre Channel Event Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_zalloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00be,
"Unable to allocate (%d KB) for FCE.\n",
@@ -1535,7 +1535,6 @@ try_fce:
goto try_eft;
}
- memset(tc, 0, FCE_SIZE);
rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
@@ -1560,8 +1559,8 @@ try_eft:
EFT_SIZE, ha->eft, ha->eft_dma);
/* Allocate memory for Extended Trace Buffer. */
- tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
- GFP_KERNEL);
+ tc = dma_zalloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
+ GFP_KERNEL);
if (!tc) {
ql_log(ql_log_warn, vha, 0x00c1,
"Unable to allocate (%d KB) for EFT.\n",
@@ -1569,7 +1568,6 @@ try_eft:
goto cont_alloc;
}
- memset(tc, 0, EFT_SIZE);
rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
if (rval) {
ql_log(ql_log_warn, vha, 0x00c2,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 760931529592..150529d98db4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -520,7 +520,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
static int
__qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
struct rsp_que *rsp, uint16_t loop_id,
- uint16_t lun, uint8_t type)
+ uint64_t lun, uint8_t type)
{
mrk_entry_t *mrk;
struct mrk_entry_24xx *mrk24 = NULL;
@@ -543,14 +543,13 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
if (IS_FWI2_CAPABLE(ha)) {
mrk24 = (struct mrk_entry_24xx *) mrk;
mrk24->nport_handle = cpu_to_le16(loop_id);
- mrk24->lun[1] = LSB(lun);
- mrk24->lun[2] = MSB(lun);
+ int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
mrk24->vp_index = vha->vp_idx;
mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
} else {
SET_TARGET_ID(ha, mrk->target, loop_id);
- mrk->lun = cpu_to_le16(lun);
+ mrk->lun = cpu_to_le16((uint16_t)lun);
}
}
wmb();
@@ -562,7 +561,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
int
qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
- struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
+ struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
uint8_t type)
{
int ret;
@@ -2047,7 +2046,7 @@ static void
qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
{
uint32_t flags;
- unsigned int lun;
+ uint64_t lun;
struct fc_port *fcport = sp->fcport;
scsi_qla_host_t *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index a56825c73c31..550a4a31f51a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1659,7 +1659,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
- "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+ "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
@@ -2281,7 +2281,7 @@ check_scsi_status:
out:
if (logit)
ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
- "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
"portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
comp_status, scsi_status, res, vha->host_no,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 1c33a77db5c2..d9aafc003be2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -947,7 +947,7 @@ qla2x00_abort_command(srb_t *sp)
}
int
-qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
+qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
{
int rval, rval2;
mbx_cmd_t mc;
@@ -1000,7 +1000,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
}
int
-qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
+qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
{
int rval, rval2;
mbx_cmd_t mc;
@@ -1022,7 +1022,7 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
mcp->mb[1] = fcport->loop_id;
else
mcp->mb[1] = fcport->loop_id << 8;
- mcp->mb[2] = l;
+ mcp->mb[2] = (u32)l;
mcp->mb[3] = 0;
mcp->mb[9] = vha->vp_idx;
@@ -2666,7 +2666,7 @@ struct tsk_mgmt_cmd {
static int
__qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
- unsigned int l, int tag)
+ uint64_t l, int tag)
{
int rval, rval2;
struct tsk_mgmt_cmd *tsk;
@@ -2760,7 +2760,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
}
int
-qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
+qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
{
struct qla_hw_data *ha = fcport->vha->hw;
@@ -2771,7 +2771,7 @@ qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
}
int
-qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
+qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
{
struct qla_hw_data *ha = fcport->vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index abeb3901498b..4775baa8b6a0 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -726,13 +726,13 @@ qlafx00_disable_intrs(struct qla_hw_data *ha)
}
int
-qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
+qlafx00_abort_target(fc_port_t *fcport, uint64_t l, int tag)
{
return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
}
int
-qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
+qlafx00_lun_reset(fc_port_t *fcport, uint64_t l, int tag)
{
return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
}
@@ -2159,7 +2159,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
- "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+ "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
@@ -2524,7 +2524,7 @@ check_scsi_status:
if (logit)
ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
- "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
"tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
"par_sense_len=0x%x, rsp_info_len=0x%x\n",
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d96bfb55e57b..be9698d920c2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -202,8 +202,8 @@ MODULE_PARM_DESC(ql2xdontresethba,
" 0 (Default) -- Reset on failure.\n"
" 1 -- Do not reset on failure.\n");
-uint ql2xmaxlun = MAX_LUNS;
-module_param(ql2xmaxlun, uint, S_IRUGO);
+uint64_t ql2xmaxlun = MAX_LUNS;
+module_param(ql2xmaxlun, ullong, S_IRUGO);
MODULE_PARM_DESC(ql2xmaxlun,
"Defines the maximum LU number to register with the SCSI "
"midlayer. Default is 65535.");
@@ -920,7 +920,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
srb_t *sp;
int ret;
- unsigned int id, lun;
+ unsigned int id;
+ uint64_t lun;
unsigned long flags;
int rval, wait = 0;
struct qla_hw_data *ha = vha->hw;
@@ -944,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
ql_dbg(ql_dbg_taskm, vha, 0x8002,
- "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
+ "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n",
vha->host_no, id, lun, sp, cmd);
/* Get a reference to the sp and drop the lock.*/
@@ -995,7 +996,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
ql_log(ql_log_info, vha, 0x801c,
- "Abort command issued nexus=%ld:%d:%d -- %d %x.\n",
+ "Abort command issued nexus=%ld:%d:%llu -- %d %x.\n",
vha->host_no, id, lun, wait, ret);
return ret;
@@ -1003,7 +1004,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int
qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
- unsigned int l, enum nexus_wait_type type)
+ uint64_t l, enum nexus_wait_type type)
{
int cnt, match, status;
unsigned long flags;
@@ -1060,7 +1061,7 @@ static char *reset_errors[] = {
static int
__qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
- struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
+ struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, uint64_t, int))
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -1075,7 +1076,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
return err;
ql_log(ql_log_info, vha, 0x8009,
- "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
+ "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no,
cmd->device->id, cmd->device->lun, cmd);
err = 0;
@@ -1100,14 +1101,14 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
}
ql_log(ql_log_info, vha, 0x800e,
- "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
+ "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name,
vha->host_no, cmd->device->id, cmd->device->lun, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
- "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
+ "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
cmd);
return FAILED;
@@ -1154,7 +1155,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int ret = FAILED;
- unsigned int id, lun;
+ unsigned int id;
+ uint64_t lun;
id = cmd->device->id;
lun = cmd->device->lun;
@@ -1169,7 +1171,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
- "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
+ "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@@ -1193,7 +1195,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
eh_bus_reset_done:
ql_log(ql_log_warn, vha, 0x802b,
- "BUS RESET %s nexus=%ld:%d:%d.\n",
+ "BUS RESET %s nexus=%ld:%d:%llu.\n",
(ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
@@ -1220,14 +1222,15 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
struct qla_hw_data *ha = vha->hw;
int ret = FAILED;
- unsigned int id, lun;
+ unsigned int id;
+ uint64_t lun;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
id = cmd->device->id;
lun = cmd->device->lun;
ql_log(ql_log_info, vha, 0x8018,
- "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
+ "ADAPTER RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun);
/*
* No point in issuing another reset if one is active. Also do not
@@ -1273,7 +1276,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
eh_host_reset_lock:
ql_log(ql_log_info, vha, 0x8017,
- "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
+ "ADAPTER RESET %s nexus=%ld:%d:%llu.\n",
(ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
@@ -1409,7 +1412,7 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
return;
ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
- "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
+ "Queue depth adjusted-down to %d for nexus=%ld:%d:%llu.\n",
sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
@@ -1432,7 +1435,7 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
ql_dbg(ql_dbg_io, vha, 0x302a,
- "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
+ "Queue depth adjusted-up to %d for nexus=%ld:%d:%llu.\n",
sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
@@ -2661,14 +2664,19 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
host->max_cmd_len = MAX_CMDSZ;
host->max_channel = MAX_BUSES - 1;
- host->max_lun = ql2xmaxlun;
+ /* Older HBAs support only 16-bit LUNs */
+ if (!IS_QLAFX00(ha) && !IS_FWI2_CAPABLE(ha) &&
+ ql2xmaxlun > 0xffff)
+ host->max_lun = 0xffff;
+ else
+ host->max_lun = ql2xmaxlun;
host->transportt = qla2xxx_transport_template;
sht->vendor_id = (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC);
ql_dbg(ql_dbg_init, base_vha, 0x0033,
"max_id=%d this_id=%d "
"cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
- "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
+ "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id,
host->this_id, host->cmd_per_lun, host->unique_id,
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5f58b451327e..2559144f5475 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -23,7 +23,7 @@ void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
- int lun);
+ uint64_t lun);
int qla4xxx_reset_target(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
int qla4xxx_get_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
@@ -76,7 +76,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
uint32_t state, uint32_t conn_error);
void qla4xxx_dump_buffer(void *b, uint32_t size);
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
+ struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod);
int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t length, uint32_t options);
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index e5697ab144d2..08ab6dac226d 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -83,7 +83,7 @@ static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
* This routine issues a marker IOCB.
**/
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod)
+ struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod)
{
struct qla4_marker_entry *marker_entry;
unsigned long flags = 0;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 081b6b78d2c6..4f9c0f2be89d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -26,7 +26,7 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
if (sense_len == 0) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
+ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%llu: %s:"
" sense len 0\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__));
@@ -43,7 +43,7 @@ static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
- DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
+ DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: %s: sense key = %x, "
"ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__,
@@ -169,7 +169,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
cmd->result = DID_ERROR << 16;
- DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: "
"Mid-layer Data underrun0, "
"xferlen = 0x%x, "
"residual = 0x%x\n", ha->host_no,
@@ -197,7 +197,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
case SCS_RESET_OCCURRED:
- DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
@@ -205,7 +205,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
case SCS_ABORTED:
- DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
@@ -213,7 +213,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
break;
case SCS_TIMEOUT:
- DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
+ DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: Timeout\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun));
@@ -232,7 +232,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
case SCS_DATA_OVERRUN:
if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
- DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
+ DEBUG2(printk("scsi%ld:%d:%d:%llu: %s: " "Data overrun\n",
ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__));
@@ -259,7 +259,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
if (!scsi_status && (scsi_bufflen(cmd) - residual) <
cmd->underflow) {
DEBUG2(ql4_printk(KERN_INFO, ha,
- "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
+ "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
ha->host_no,
cmd->device->channel,
cmd->device->id,
@@ -291,7 +291,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
*/
DEBUG2(ql4_printk(KERN_INFO, ha,
- "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
ha->host_no,
cmd->device->channel,
cmd->device->id,
@@ -313,7 +313,7 @@ check_scsi_status:
case SCS_DEVICE_LOGGED_OUT:
case SCS_DEVICE_UNAVAILABLE:
- DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
+ DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%llu: SCS_DEVICE "
"state: 0x%x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, sts_entry->completionStatus));
@@ -333,7 +333,7 @@ check_scsi_status:
* SCSI Mid-Layer handles device queue full
*/
cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
- DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
+ DEBUG2(printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected "
"compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
" iResp=%02x\n", ha->host_no, cmd->device->id,
cmd->device->lun, __func__,
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 0a3312c6dd6d..fdfae79924ac 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1205,7 +1205,7 @@ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE) {
status = QLA_ERROR;
- DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%d: abort task FAILED: "
+ DEBUG2(printk(KERN_WARNING "scsi%ld:%d:%llu: abort task FAILED: "
"mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n",
ha->host_no, cmd->device->id, cmd->device->lun, mbox_sts[0],
mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]));
@@ -1225,14 +1225,14 @@ int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb)
* are valid before calling this routine.
**/
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
- int lun)
+ uint64_t lun)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
uint32_t scsi_lun[2];
int status = QLA_SUCCESS;
- DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
+ DEBUG2(printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no,
ddb_entry->fw_ddb_index, lun));
/*
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 320206376206..c5d9564d455c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -9223,20 +9223,20 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
{
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
unsigned int id = cmd->device->id;
- unsigned int lun = cmd->device->lun;
+ uint64_t lun = cmd->device->lun;
unsigned long flags;
struct srb *srb = NULL;
int ret = SUCCESS;
int wait = 0;
- ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n",
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
ha->host_no, id, lun, cmd, cmd->cmnd[0]);
spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
if (!srb) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n",
+ ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Specified command has already completed.\n",
ha->host_no, id, lun);
return SUCCESS;
}
@@ -9244,11 +9244,11 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
- DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
+ DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx failed.\n",
ha->host_no, id, lun));
ret = FAILED;
} else {
- DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx success.\n",
+ DEBUG3(printk("scsi%ld:%d:%llu: Abort_task mbx success.\n",
ha->host_no, id, lun));
wait = 1;
}
@@ -9258,14 +9258,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
/* Wait for command to complete */
if (wait) {
if (!qla4xxx_eh_wait_on_command(ha, cmd)) {
- DEBUG2(printk("scsi%ld:%d:%d: Abort handler timed out\n",
+ DEBUG2(printk("scsi%ld:%d:%llu: Abort handler timed out\n",
ha->host_no, id, lun));
ret = FAILED;
}
}
ql4_printk(KERN_INFO, ha,
- "scsi%ld:%d:%d: Abort command - %s\n",
+ "scsi%ld:%d:%llu: Abort command - %s\n",
ha->host_no, id, lun, (ret == SUCCESS) ? "succeeded" : "failed");
return ret;
@@ -9293,7 +9293,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
ret = FAILED;
ql4_printk(KERN_INFO, ha,
- "scsi%ld:%d:%d:%d: DEVICE RESET ISSUED.\n", ha->host_no,
+ "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no,
cmd->device->channel, cmd->device->id, cmd->device->lun);
DEBUG2(printk(KERN_INFO
@@ -9323,7 +9323,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
goto eh_dev_reset_done;
ql4_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): DEVICE RESET SUCCEEDED.\n",
+ "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n",
ha->host_no, cmd->device->channel, cmd->device->id,
cmd->device->lun);
@@ -9440,7 +9440,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
}
ql4_printk(KERN_INFO, ha,
- "scsi(%ld:%d:%d:%d): HOST RESET ISSUED.\n", ha->host_no,
+ "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no,
cmd->device->channel, cmd->device->id, cmd->device->lun);
if (qla4xxx_wait_for_hba_online(ha) != QLA_SUCCESS) {
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 13d628b56ff7..a22bb1b40ce2 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -171,8 +171,6 @@ static int qlogicfas_release(struct Scsi_Host *shost)
qlogicfas408_disable_ints(priv);
free_irq(shost->irq, shost);
}
- if (shost->dma_channel != 0xff)
- free_dma(shost->dma_channel);
if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port);
scsi_host_put(shost);
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 6d48d30bed05..740ae495aa77 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -959,7 +959,7 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int
/* Temporary workaround until bug is found and fixed (one bug has been found
already, but fixing it makes things even worse) -jj */
int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64;
- host->can_queue = host->host_busy + num_free;
+ host->can_queue = atomic_read(&host->host_busy) + num_free;
host->sg_tablesize = QLOGICPTI_MAX_SG(num_free);
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 88d46fe6bf98..df3306019a7e 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -72,8 +72,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/scsi.h>
-static void scsi_done(struct scsi_cmnd *cmd);
-
/*
* Definitions and constants.
*/
@@ -124,6 +122,8 @@ static const char *const scsi_device_types[] = {
"Bridge controller",
"Object storage ",
"Automation/Drive ",
+ "Security Manager ",
+ "Direct-Access-ZBC",
};
/**
@@ -235,7 +235,8 @@ fail:
* Description: allocate a struct scsi_cmd from host's slab, recycling from the
* host's free_list if necessary.
*/
-struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
+static struct scsi_cmnd *
+__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
{
struct scsi_cmnd *cmd = scsi_host_alloc_command(shost, gfp_mask);
@@ -265,7 +266,6 @@ struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *shost, gfp_t gfp_mask)
return cmd;
}
-EXPORT_SYMBOL_GPL(__scsi_get_command);
/**
* scsi_get_command - Allocate and setup a scsi command block
@@ -291,14 +291,13 @@ struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)
cmd->jiffies_at_alloc = jiffies;
return cmd;
}
-EXPORT_SYMBOL(scsi_get_command);
/**
* __scsi_put_command - Free a struct scsi_cmnd
* @shost: dev->host
* @cmd: Command to free
*/
-void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+static void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
unsigned long flags;
@@ -314,7 +313,6 @@ void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
if (likely(cmd != NULL))
scsi_host_free_command(shost, cmd);
}
-EXPORT_SYMBOL(__scsi_put_command);
/**
* scsi_put_command - Free a scsi command block
@@ -334,11 +332,10 @@ void scsi_put_command(struct scsi_cmnd *cmd)
list_del_init(&cmd->list);
spin_unlock_irqrestore(&cmd->device->list_lock, flags);
- cancel_delayed_work(&cmd->abort_work);
+ BUG_ON(delayed_work_pending(&cmd->abort_work));
__scsi_put_command(cmd->device->host, cmd);
}
-EXPORT_SYMBOL(scsi_put_command);
static struct scsi_host_cmd_pool *
scsi_find_host_cmd_pool(struct Scsi_Host *shost)
@@ -368,8 +365,8 @@ scsi_alloc_host_cmd_pool(struct Scsi_Host *shost)
if (!pool)
return NULL;
- pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name);
- pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name);
+ pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->proc_name);
+ pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->proc_name);
if (!pool->cmd_name || !pool->sense_name) {
scsi_free_host_cmd_pool(pool);
return NULL;
@@ -605,7 +602,7 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
if (level > 3)
scmd_printk(KERN_INFO, cmd,
"scsi host busy %d failed %d\n",
- cmd->device->host->host_busy,
+ atomic_read(&cmd->device->host->host_busy),
cmd->device->host->host_failed);
}
}
@@ -648,33 +645,24 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
* returns an immediate error upwards, and signals
* that the device is no longer present */
cmd->result = DID_NO_CONNECT << 16;
- scsi_done(cmd);
- /* return 0 (because the command has been processed) */
- goto out;
+ goto done;
}
/* Check to see if the scsi lld made this device blocked. */
if (unlikely(scsi_device_blocked(cmd->device))) {
- /*
+ /*
* in blocked state, the command is just put back on
* the device queue. The suspend state has already
* blocked the queue so future requests should not
* occur until the device transitions out of the
* suspend state.
*/
-
- scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
-
- SCSI_LOG_MLQUEUE(3, printk("queuecommand : device blocked \n"));
-
- /*
- * NOTE: rtn is still zero here because we don't need the
- * queue to be plugged on return (it's already stopped)
- */
- goto out;
+ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
+ "queuecommand : device blocked\n"));
+ return SCSI_MLQUEUE_DEVICE_BUSY;
}
- /*
+ /*
* If SCSI-2 or lower, store the LUN value in cmnd.
*/
if (cmd->device->scsi_level <= SCSI_2 &&
@@ -690,57 +678,36 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
* length exceeds what the host adapter can handle.
*/
if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
- SCSI_LOG_MLQUEUE(3,
- printk("queuecommand : command too long. "
+ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
+ "queuecommand : command too long. "
"cdb_size=%d host->max_cmd_len=%d\n",
cmd->cmd_len, cmd->device->host->max_cmd_len));
cmd->result = (DID_ABORT << 16);
-
- scsi_done(cmd);
- goto out;
+ goto done;
}
if (unlikely(host->shost_state == SHOST_DEL)) {
cmd->result = (DID_NO_CONNECT << 16);
- scsi_done(cmd);
- } else {
- trace_scsi_dispatch_cmd_start(cmd);
- cmd->scsi_done = scsi_done;
- rtn = host->hostt->queuecommand(host, cmd);
+ goto done;
+
}
+ trace_scsi_dispatch_cmd_start(cmd);
+ rtn = host->hostt->queuecommand(host, cmd);
if (rtn) {
trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
rtn = SCSI_MLQUEUE_HOST_BUSY;
- scsi_queue_insert(cmd, rtn);
-
- SCSI_LOG_MLQUEUE(3,
- printk("queuecommand : request rejected\n"));
+ SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
+ "queuecommand : request rejected\n"));
}
- out:
- SCSI_LOG_MLQUEUE(3, printk("leaving scsi_dispatch_cmnd()\n"));
return rtn;
-}
-
-/**
- * scsi_done - Invoke completion on finished SCSI command.
- * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
- * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
- *
- * Description: This function is the mid-level's (SCSI Core) interrupt routine,
- * which regains ownership of the SCSI command (de facto) from a LLDD, and
- * calls blk_complete_request() for further processing.
- *
- * This function is interrupt context safe.
- */
-static void scsi_done(struct scsi_cmnd *cmd)
-{
- trace_scsi_dispatch_cmd_done(cmd);
- blk_complete_request(cmd->request);
+ done:
+ cmd->scsi_done(cmd);
+ return 0;
}
/**
@@ -761,17 +728,16 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
scsi_device_unbusy(sdev);
- /*
- * Clear the flags which say that the device/host is no longer
- * capable of accepting new commands. These are set in scsi_queue.c
- * for both the queue full condition on a device, and for a
- * host full condition on the host.
- *
- * XXX(hch): What about locking?
- */
- shost->host_blocked = 0;
- starget->target_blocked = 0;
- sdev->device_blocked = 0;
+ /*
+ * Clear the flags that say that the device/target/host is no longer
+ * capable of accepting new commands.
+ */
+ if (atomic_read(&shost->host_blocked))
+ atomic_set(&shost->host_blocked, 0);
+ if (atomic_read(&starget->target_blocked))
+ atomic_set(&starget->target_blocked, 0);
+ if (atomic_read(&sdev->device_blocked))
+ atomic_set(&sdev->device_blocked, 0);
/*
* If we have valid sense information, then some kind of recovery
@@ -801,7 +767,6 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
}
scsi_io_completion(cmd, good_bytes);
}
-EXPORT_SYMBOL(scsi_finish_command);
/**
* scsi_adjust_queue_depth - Let low level drivers change a device's queue depth
@@ -842,7 +807,7 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
* is more IO than the LLD's can_queue (so there are not enuogh
* tags) request_fn's host queue ready check will handle it.
*/
- if (!sdev->host->bqt) {
+ if (!shost_use_blk_mq(sdev->host) && !sdev->host->bqt) {
if (blk_queue_tagged(sdev->request_queue) &&
blk_queue_resize_tags(sdev->request_queue, tags) != 0)
goto out;
@@ -850,6 +815,10 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
sdev->queue_depth = tags;
switch (tagged) {
+ case 0:
+ sdev->ordered_tags = 0;
+ sdev->simple_tags = 0;
+ break;
case MSG_ORDERED_TAG:
sdev->ordered_tags = 1;
sdev->simple_tags = 1;
@@ -859,13 +828,11 @@ void scsi_adjust_queue_depth(struct scsi_device *sdev, int tagged, int tags)
sdev->simple_tags = 1;
break;
default:
+ sdev->ordered_tags = 0;
+ sdev->simple_tags = 0;
sdev_printk(KERN_WARNING, sdev,
"scsi_adjust_queue_depth, bad queue type, "
"disabled\n");
- case 0:
- sdev->ordered_tags = sdev->simple_tags = 0;
- sdev->queue_depth = tags;
- break;
}
out:
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
@@ -1291,7 +1258,7 @@ EXPORT_SYMBOL(__starget_for_each_device);
* really want to use scsi_device_lookup_by_target instead.
**/
struct scsi_device *__scsi_device_lookup_by_target(struct scsi_target *starget,
- uint lun)
+ u64 lun)
{
struct scsi_device *sdev;
@@ -1316,7 +1283,7 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
* needs to be released with scsi_device_put once you're done with it.
**/
struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
- uint lun)
+ u64 lun)
{
struct scsi_device *sdev;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
@@ -1349,7 +1316,7 @@ EXPORT_SYMBOL(scsi_device_lookup_by_target);
* really want to use scsi_device_lookup instead.
**/
struct scsi_device *__scsi_device_lookup(struct Scsi_Host *shost,
- uint channel, uint id, uint lun)
+ uint channel, uint id, u64 lun)
{
struct scsi_device *sdev;
@@ -1375,7 +1342,7 @@ EXPORT_SYMBOL(__scsi_device_lookup);
* needs to be released with scsi_device_put once you're done with it.
**/
struct scsi_device *scsi_device_lookup(struct Scsi_Host *shost,
- uint channel, uint id, uint lun)
+ uint channel, uint id, u64 lun)
{
struct scsi_device *sdev;
unsigned long flags;
@@ -1396,6 +1363,9 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
+bool scsi_use_blk_mq = false;
+module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
+
static int __init init_scsi(void)
{
int error;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 1328a2621070..d19c0e3c7f48 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -42,6 +42,10 @@
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/hrtimer.h>
#include <net/checksum.h>
@@ -53,13 +57,16 @@
#include <scsi/scsi_host.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include "sd.h"
#include "scsi_logging.h"
-#define SCSI_DEBUG_VERSION "1.82"
-static const char * scsi_debug_version_date = "20100324";
+#define SCSI_DEBUG_VERSION "1.84"
+static const char *scsi_debug_version_date = "20140706";
+
+#define MY_NAME "scsi_debug"
/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0
@@ -72,7 +79,11 @@ static const char * scsi_debug_version_date = "20100324";
#define INVALID_COMMAND_OPCODE 0x20
#define INVALID_FIELD_IN_CDB 0x24
#define INVALID_FIELD_IN_PARAM_LIST 0x26
-#define POWERON_RESET 0x29
+#define UA_RESET_ASC 0x29
+#define UA_CHANGED_ASC 0x2a
+#define POWER_ON_RESET_ASCQ 0x0
+#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
+#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
#define SAVING_PARAMS_UNSUP 0x39
#define TRANSPORT_PROBLEM 0x4b
#define THRESHOLD_EXCEEDED 0x5d
@@ -81,7 +92,6 @@ static const char * scsi_debug_version_date = "20100324";
/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO 0x3
-#define SDEBUG_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
/* Default values for driver parameters */
#define DEF_NUM_HOST 1
@@ -91,7 +101,7 @@ static const char * scsi_debug_version_date = "20100324";
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
#define DEF_ATO 1
-#define DEF_DELAY 1
+#define DEF_DELAY 1 /* if > 0 unit is a jiffy */
#define DEF_DEV_SIZE_MB 8
#define DEF_DIF 0
#define DEF_DIX 0
@@ -99,11 +109,13 @@ static const char * scsi_debug_version_date = "20100324";
#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
#define DEF_GUARD 0
+#define DEF_HOST_LOCK 0
#define DEF_LBPU 0
#define DEF_LBPWS 0
#define DEF_LBPWS10 0
#define DEF_LBPRZ 1
#define DEF_LOWEST_ALIGNED 0
+#define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
#define DEF_NO_LUN_0 0
#define DEF_NUM_PARTS 0
#define DEF_OPTS 0
@@ -113,6 +125,7 @@ static const char * scsi_debug_version_date = "20100324";
#define DEF_REMOVABLE false
#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
#define DEF_SECTOR_SIZE 512
+#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
#define DEF_UNMAP_ALIGNMENT 0
#define DEF_UNMAP_GRANULARITY 1
#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
@@ -120,6 +133,7 @@ static const char * scsi_debug_version_date = "20100324";
#define DEF_VIRTUAL_GB 0
#define DEF_VPD_USE_HOSTNO 1
#define DEF_WRITESAME_LENGTH 0xFFFF
+#define DELAY_OVERRIDDEN -9999
/* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1
@@ -130,7 +144,14 @@ static const char * scsi_debug_version_date = "20100324";
#define SCSI_DEBUG_OPT_DIF_ERR 32
#define SCSI_DEBUG_OPT_DIX_ERR 64
#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
-#define SCSI_DEBUG_OPT_SHORT_TRANSFER 256
+#define SCSI_DEBUG_OPT_SHORT_TRANSFER 0x100
+#define SCSI_DEBUG_OPT_Q_NOISE 0x200
+#define SCSI_DEBUG_OPT_ALL_TSF 0x400
+#define SCSI_DEBUG_OPT_RARE_TSF 0x800
+#define SCSI_DEBUG_OPT_N_WCE 0x1000
+#define SCSI_DEBUG_OPT_RESET_NOISE 0x2000
+#define SCSI_DEBUG_OPT_NO_CDB_NOISE 0x4000
+#define SCSI_DEBUG_OPT_ALL_NOISE (0x1 | 0x200 | 0x2000)
/* When "every_nth" > 0 then modulo "every_nth" commands:
* - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
* - a RECOVERED_ERROR is simulated on successful read and write
@@ -148,6 +169,19 @@ static const char * scsi_debug_version_date = "20100324";
* writing a new value (other than -1 or 1) to every_nth via sysfs).
*/
+/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs)are returned in
+ * priority order. In the subset implemented here lower numbers have higher
+ * priority. The UA numbers should be a sequence starting from 0 with
+ * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
+#define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
+#define SDEBUG_UA_BUS_RESET 1
+#define SDEBUG_UA_MODE_CHANGED 2
+#define SDEBUG_NUM_UAS 3
+
+/* for check_readiness() */
+#define UAS_ONLY 1
+#define UAS_TUR 0
+
/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
@@ -158,9 +192,19 @@ static const char * scsi_debug_version_date = "20100324";
#define SAM2_LUN_ADDRESS_METHOD 0
#define SAM2_WLUN_REPORT_LUNS 0xc101
-/* Can queue up to this number of commands. Typically commands that
- * that have a non-zero delay are queued. */
-#define SCSI_DEBUG_CANQUEUE 255
+/* SCSI_DEBUG_CANQUEUE is the maximum number of commands that can be queued
+ * (for response) at one time. Can be reduced by max_queue option. Command
+ * responses are not queued when delay=0 and ndelay=0. The per-device
+ * DEF_CMD_PER_LUN can be changed via sysfs:
+ * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth but cannot exceed
+ * SCSI_DEBUG_CANQUEUE. */
+#define SCSI_DEBUG_CANQUEUE_WORDS 9 /* a WORD is bits in a long */
+#define SCSI_DEBUG_CANQUEUE (SCSI_DEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
+#define DEF_CMD_PER_LUN 255
+
+#if DEF_CMD_PER_LUN > SCSI_DEBUG_CANQUEUE
+#warning "Expect DEF_CMD_PER_LUN <= SCSI_DEBUG_CANQUEUE"
+#endif
static int scsi_debug_add_host = DEF_NUM_HOST;
static int scsi_debug_ato = DEF_ATO;
@@ -175,6 +219,8 @@ static unsigned int scsi_debug_guard = DEF_GUARD;
static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
+static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
+static int scsi_debug_ndelay = DEF_NDELAY;
static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
static int scsi_debug_no_uld = 0;
static int scsi_debug_num_parts = DEF_NUM_PARTS;
@@ -198,8 +244,11 @@ static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
static bool scsi_debug_removable = DEF_REMOVABLE;
static bool scsi_debug_clustering;
+static bool scsi_debug_host_lock = DEF_HOST_LOCK;
-static int scsi_debug_cmnd_count = 0;
+static atomic_t sdebug_cmnd_count;
+static atomic_t sdebug_completions;
+static atomic_t sdebug_a_tsf; /* counter of 'almost' TSFs */
#define DEV_READONLY(TGT) (0)
@@ -214,24 +263,23 @@ static int sdebug_sectors_per; /* sectors per cylinder */
#define SDEBUG_MAX_PARTS 4
-#define SDEBUG_SENSE_LEN 32
-
#define SCSI_DEBUG_MAX_CMD_LEN 32
static unsigned int scsi_debug_lbp(void)
{
- return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
+ return ((0 == scsi_debug_fake_rw) &&
+ (scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10));
}
struct sdebug_dev_info {
struct list_head dev_list;
- unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
unsigned int channel;
unsigned int target;
- unsigned int lun;
+ u64 lun;
struct sdebug_host_info *sdbg_host;
- unsigned int wlun;
- char reset;
+ u64 wlun;
+ unsigned long uas_bm[1];
+ atomic_t num_in_q;
char stopped;
char used;
};
@@ -249,26 +297,33 @@ struct sdebug_host_info {
static LIST_HEAD(sdebug_host_list);
static DEFINE_SPINLOCK(sdebug_host_list_lock);
-typedef void (* done_funct_t) (struct scsi_cmnd *);
+
+struct sdebug_hrtimer { /* ... is derived from hrtimer */
+ struct hrtimer hrt; /* must be first element */
+ int qa_indx;
+};
struct sdebug_queued_cmd {
- int in_use;
- struct timer_list cmnd_timer;
- done_funct_t done_funct;
+ /* in_use flagged by a bit in queued_in_use_bm[] */
+ struct timer_list *cmnd_timerp;
+ struct tasklet_struct *tletp;
+ struct sdebug_hrtimer *sd_hrtp;
struct scsi_cmnd * a_cmnd;
- int scsi_result;
};
static struct sdebug_queued_cmd queued_arr[SCSI_DEBUG_CANQUEUE];
+static unsigned long queued_in_use_bm[SCSI_DEBUG_CANQUEUE_WORDS];
+
static unsigned char * fake_storep; /* ramdisk storage */
static struct sd_dif_tuple *dif_storep; /* protection info */
static void *map_storep; /* provisioning map */
static unsigned long map_size;
-static int num_aborts = 0;
-static int num_dev_resets = 0;
-static int num_bus_resets = 0;
-static int num_host_resets = 0;
+static int num_aborts;
+static int num_dev_resets;
+static int num_target_resets;
+static int num_bus_resets;
+static int num_host_resets;
static int dix_writes;
static int dix_reads;
static int dif_errors;
@@ -276,7 +331,8 @@ static int dif_errors;
static DEFINE_SPINLOCK(queued_arr_lock);
static DEFINE_RWLOCK(atomic_rw);
-static char sdebug_proc_name[] = "scsi_debug";
+static char sdebug_proc_name[] = MY_NAME;
+static const char *my_name = MY_NAME;
static struct bus_type pseudo_lld_bus;
@@ -291,6 +347,12 @@ static const int check_condition_result =
static const int illegal_condition_result =
(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
+static const int device_qfull_result =
+ (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
+
+static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
+ 0, 0, 0, 0};
static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
0, 0, 0x2, 0x4b};
static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
@@ -332,19 +394,24 @@ static void sdebug_max_tgts_luns(void)
spin_unlock(&sdebug_host_list_lock);
}
-static void mk_sense_buffer(struct sdebug_dev_info *devip, int key,
- int asc, int asq)
+static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
{
unsigned char *sbuff;
- sbuff = devip->sense_buff;
- memset(sbuff, 0, SDEBUG_SENSE_LEN);
+ sbuff = scp->sense_buffer;
+ if (!sbuff) {
+ sdev_printk(KERN_ERR, scp->device,
+ "%s: sense_buffer is NULL\n", __func__);
+ return;
+ }
+ memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
scsi_build_sense_buffer(scsi_debug_dsense, sbuff, key, asc, asq);
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: [sense_key,asc,ascq]: "
- "[0x%x,0x%x,0x%x]\n", key, asc, asq);
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
+ my_name, key, asc, asq);
}
static void get_data_transfer_info(unsigned char *cmd,
@@ -409,29 +476,71 @@ static void get_data_transfer_info(unsigned char *cmd,
static int scsi_debug_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
{
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
- printk(KERN_INFO "scsi_debug: ioctl: cmd=0x%x\n", cmd);
+ if (0x1261 == cmd)
+ sdev_printk(KERN_INFO, dev,
+ "%s: BLKFLSBUF [0x1261]\n", __func__);
+ else if (0x5331 == cmd)
+ sdev_printk(KERN_INFO, dev,
+ "%s: CDROM_GET_CAPABILITY [0x5331]\n",
+ __func__);
+ else
+ sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
+ __func__, cmd);
}
return -EINVAL;
/* return -ENOTTY; // correct return but upsets fdisk */
}
-static int check_readiness(struct scsi_cmnd * SCpnt, int reset_only,
+static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
struct sdebug_dev_info * devip)
{
- if (devip->reset) {
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: Reporting Unit "
- "attention: power on reset\n");
- devip->reset = 0;
- mk_sense_buffer(devip, UNIT_ATTENTION, POWERON_RESET, 0);
+ int k;
+ bool debug = !!(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts);
+
+ k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
+ if (k != SDEBUG_NUM_UAS) {
+ const char *cp = NULL;
+
+ switch (k) {
+ case SDEBUG_UA_POR:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ UA_RESET_ASC, POWER_ON_RESET_ASCQ);
+ if (debug)
+ cp = "power on reset";
+ break;
+ case SDEBUG_UA_BUS_RESET:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ UA_RESET_ASC, BUS_RESET_ASCQ);
+ if (debug)
+ cp = "bus reset";
+ break;
+ case SDEBUG_UA_MODE_CHANGED:
+ mk_sense_buffer(SCpnt, UNIT_ATTENTION,
+ UA_CHANGED_ASC, MODE_CHANGED_ASCQ);
+ if (debug)
+ cp = "mode parameters changed";
+ break;
+ default:
+ pr_warn("%s: unexpected unit attention code=%d\n",
+ __func__, k);
+ if (debug)
+ cp = "unknown";
+ break;
+ }
+ clear_bit(k, devip->uas_bm);
+ if (debug)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s reports: Unit attention: %s\n",
+ my_name, cp);
return check_condition_result;
}
- if ((0 == reset_only) && devip->stopped) {
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: Reporting Not "
- "ready: initializing command required\n");
- mk_sense_buffer(devip, NOT_READY, LOGICAL_UNIT_NOT_READY,
+ if ((UAS_TUR == uas_only) && devip->stopped) {
+ mk_sense_buffer(SCpnt, NOT_READY, LOGICAL_UNIT_NOT_READY,
0x2);
+ if (debug)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s reports: Not ready: %s\n", my_name,
+ "initializing command required");
return check_condition_result;
}
return 0;
@@ -471,8 +580,9 @@ static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
static const char * inq_vendor_id = "Linux ";
static const char * inq_product_id = "scsi_debug ";
-static const char * inq_product_rev = "0004";
+static const char *inq_product_rev = "0184"; /* version less '.' */
+/* Device identification VPD page. Returns number of bytes placed in arr */
static int inquiry_evpd_83(unsigned char * arr, int port_group_id,
int target_dev_id, int dev_id_num,
const char * dev_id_str,
@@ -573,12 +683,14 @@ static unsigned char vpd84_data[] = {
0x22,0x22,0x22,0x0,0xbb,0x2,
};
+/* Software interface identification VPD page */
static int inquiry_evpd_84(unsigned char * arr)
{
memcpy(arr, vpd84_data, sizeof(vpd84_data));
return sizeof(vpd84_data);
}
+/* Management network addresses VPD page */
static int inquiry_evpd_85(unsigned char * arr)
{
int num = 0;
@@ -713,6 +825,7 @@ static unsigned char vpd89_data[] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
};
+/* ATA Information VPD page */
static int inquiry_evpd_89(unsigned char * arr)
{
memcpy(arr, vpd89_data, sizeof(vpd89_data));
@@ -720,7 +833,6 @@ static int inquiry_evpd_89(unsigned char * arr)
}
-/* Block limits VPD page (SBC-3) */
static unsigned char vpdb0_data[] = {
/* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
@@ -728,6 +840,7 @@ static unsigned char vpdb0_data[] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
};
+/* Block limits VPD page (SBC-3) */
static int inquiry_evpd_b0(unsigned char * arr)
{
unsigned int gran;
@@ -811,7 +924,7 @@ static int inquiry_evpd_b2(unsigned char *arr)
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
-static int resp_inquiry(struct scsi_cmnd * scp, int target,
+static int resp_inquiry(struct scsi_cmnd *scp, int target,
struct sdebug_dev_info * devip)
{
unsigned char pq_pdt;
@@ -831,7 +944,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
pq_pdt = (scsi_debug_ptype & 0x1f);
arr[0] = pq_pdt;
if (0x2 & cmd[1]) { /* CMDDT bit set */
- mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
0);
kfree(arr);
return check_condition_result;
@@ -917,7 +1030,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
arr[3] = inquiry_evpd_b2(&arr[4]);
} else {
/* Illegal request, invalid field in cdb */
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
kfree(arr);
return check_condition_result;
@@ -963,15 +1076,13 @@ static int resp_requests(struct scsi_cmnd * scp,
{
unsigned char * sbuff;
unsigned char *cmd = (unsigned char *)scp->cmnd;
- unsigned char arr[SDEBUG_SENSE_LEN];
+ unsigned char arr[SCSI_SENSE_BUFFERSIZE];
int want_dsense;
int len = 18;
memset(arr, 0, sizeof(arr));
- if (devip->reset == 1)
- mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
want_dsense = !!(cmd[1] & 1) || scsi_debug_dsense;
- sbuff = devip->sense_buff;
+ sbuff = scp->sense_buffer;
if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
if (want_dsense) {
arr[0] = 0x72;
@@ -986,7 +1097,7 @@ static int resp_requests(struct scsi_cmnd * scp,
arr[13] = 0xff; /* TEST set and MRIE==6 */
}
} else {
- memcpy(arr, sbuff, SDEBUG_SENSE_LEN);
+ memcpy(arr, sbuff, SCSI_SENSE_BUFFERSIZE);
if ((cmd[1] & 1) && (! scsi_debug_dsense)) {
/* DESC bit set and sense_buff in fixed format */
memset(arr, 0, sizeof(arr));
@@ -997,7 +1108,7 @@ static int resp_requests(struct scsi_cmnd * scp,
len = 8;
}
}
- mk_sense_buffer(devip, 0, NO_ADDITIONAL_SENSE, 0);
+ mk_sense_buffer(scp, 0, NO_ADDITIONAL_SENSE, 0);
return fill_from_dev_buffer(scp, arr, len);
}
@@ -1007,11 +1118,12 @@ static int resp_start_stop(struct scsi_cmnd * scp,
unsigned char *cmd = (unsigned char *)scp->cmnd;
int power_cond, errsts, start;
- if ((errsts = check_readiness(scp, 1, devip)))
+ errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (errsts)
return errsts;
power_cond = (cmd[4] & 0xf0) >> 4;
if (power_cond) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
0);
return check_condition_result;
}
@@ -1038,7 +1150,8 @@ static int resp_readcap(struct scsi_cmnd * scp,
unsigned int capac;
int errsts;
- if ((errsts = check_readiness(scp, 1, devip)))
+ errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (errsts)
return errsts;
/* following just in case virtual_gb changed */
sdebug_capacity = get_sdebug_capacity();
@@ -1069,7 +1182,8 @@ static int resp_readcap16(struct scsi_cmnd * scp,
unsigned long long capac;
int errsts, k, alloc_len;
- if ((errsts = check_readiness(scp, 1, devip)))
+ errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (errsts)
return errsts;
alloc_len = ((cmd[10] << 24) + (cmd[11] << 16) + (cmd[12] << 8)
+ cmd[13]);
@@ -1230,12 +1344,18 @@ static int resp_format_pg(unsigned char * p, int pcontrol, int target)
static int resp_caching_pg(unsigned char * p, int pcontrol, int target)
{ /* Caching page for mode_sense */
- unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
+ unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
+ if (SCSI_DEBUG_OPT_N_WCE & scsi_debug_opts)
+ caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
memcpy(p, caching_pg, sizeof(caching_pg));
if (1 == pcontrol)
- memset(p + 2, 0, sizeof(caching_pg) - 2);
+ memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
+ else if (2 == pcontrol)
+ memcpy(p, d_caching_pg, sizeof(d_caching_pg));
return sizeof(caching_pg);
}
@@ -1350,7 +1470,8 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
unsigned char *cmd = (unsigned char *)scp->cmnd;
- if ((errsts = check_readiness(scp, 1, devip)))
+ errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (errsts)
return errsts;
dbd = !!(cmd[1] & 0x8);
pcontrol = (cmd[2] & 0xc0) >> 6;
@@ -1365,8 +1486,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
alloc_len = msense_6 ? cmd[4] : ((cmd[7] << 8) | cmd[8]);
memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
if (0x3 == pcontrol) { /* Saving values not supported */
- mk_sense_buffer(devip, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP,
- 0);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
return check_condition_result;
}
target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
@@ -1422,7 +1542,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
/* TODO: Control Extension page */
- mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
0);
return check_condition_result;
}
@@ -1449,7 +1569,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
break;
case 0x19: /* if spc==1 then sas phy, control+discover */
if ((subpcode > 0x2) && (subpcode < 0xff)) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
@@ -1482,14 +1602,14 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
}
len += resp_iec_m_pg(ap + len, pcontrol, target);
} else {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
offset += len;
break;
default:
- mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
0);
return check_condition_result;
}
@@ -1512,14 +1632,15 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
unsigned char *cmd = (unsigned char *)scp->cmnd;
- if ((errsts = check_readiness(scp, 1, devip)))
+ errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (errsts)
return errsts;
memset(arr, 0, sizeof(arr));
pf = cmd[1] & 0x10;
sp = cmd[1] & 0x1;
param_len = mselect6 ? cmd[4] : ((cmd[7] << 8) + cmd[8]);
if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
@@ -1528,12 +1649,13 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
return (DID_ERROR << 16);
else if ((res < param_len) &&
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
- printk(KERN_INFO "scsi_debug: mode_select: cdb indicated=%d, "
- " IO sent=%d bytes\n", param_len, res);
+ sdev_printk(KERN_INFO, scp->device,
+ "%s: cdb indicated=%d, IO sent=%d bytes\n",
+ __func__, param_len, res);
md_len = mselect6 ? (arr[0] + 1) : ((arr[0] << 8) + arr[1] + 2);
bd_len = mselect6 ? arr[3] : ((arr[6] << 8) + arr[7]);
if (md_len > 2) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_PARAM_LIST, 0);
return check_condition_result;
}
@@ -1541,7 +1663,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
mpage = arr[off] & 0x3f;
ps = !!(arr[off] & 0x80);
if (ps) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_PARAM_LIST, 0);
return check_condition_result;
}
@@ -1549,32 +1671,42 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
pg_len = spf ? ((arr[off + 2] << 8) + arr[off + 3] + 4) :
(arr[off + 1] + 2);
if ((pg_len + off) > param_len) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
PARAMETER_LIST_LENGTH_ERR, 0);
return check_condition_result;
}
switch (mpage) {
+ case 0x8: /* Caching Mode page */
+ if (caching_pg[1] == arr[off + 1]) {
+ memcpy(caching_pg + 2, arr + off + 2,
+ sizeof(caching_pg) - 2);
+ goto set_mode_changed_ua;
+ }
+ break;
case 0xa: /* Control Mode page */
if (ctrl_m_pg[1] == arr[off + 1]) {
memcpy(ctrl_m_pg + 2, arr + off + 2,
sizeof(ctrl_m_pg) - 2);
scsi_debug_dsense = !!(ctrl_m_pg[2] & 0x4);
- return 0;
+ goto set_mode_changed_ua;
}
break;
case 0x1c: /* Informational Exceptions Mode page */
if (iec_m_pg[1] == arr[off + 1]) {
memcpy(iec_m_pg + 2, arr + off + 2,
sizeof(iec_m_pg) - 2);
- return 0;
+ goto set_mode_changed_ua;
}
break;
default:
break;
}
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_PARAM_LIST, 0);
return check_condition_result;
+set_mode_changed_ua:
+ set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
+ return 0;
}
static int resp_temp_l_pg(unsigned char * arr)
@@ -1609,13 +1741,14 @@ static int resp_log_sense(struct scsi_cmnd * scp,
unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
unsigned char *cmd = (unsigned char *)scp->cmnd;
- if ((errsts = check_readiness(scp, 1, devip)))
+ errsts = check_readiness(scp, UAS_ONLY, devip);
+ if (errsts)
return errsts;
memset(arr, 0, sizeof(arr));
ppc = cmd[1] & 0x2;
sp = cmd[1] & 0x1;
if (ppc || sp) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
@@ -1640,7 +1773,7 @@ static int resp_log_sense(struct scsi_cmnd * scp,
arr[3] = resp_ie_l_pg(arr + 4);
break;
default:
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
@@ -1673,12 +1806,12 @@ static int resp_log_sense(struct scsi_cmnd * scp,
arr[3] = n - 4;
break;
default:
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
} else {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
@@ -1687,16 +1820,16 @@ static int resp_log_sense(struct scsi_cmnd * scp,
min(len, SDEBUG_MAX_INQ_ARR_SZ));
}
-static int check_device_access_params(struct sdebug_dev_info *devi,
+static int check_device_access_params(struct scsi_cmnd *scp,
unsigned long long lba, unsigned int num)
{
if (lba + num > sdebug_capacity) {
- mk_sense_buffer(devi, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, ADDR_OUT_OF_RANGE, 0);
return check_condition_result;
}
/* transfer length excessive (tie in to block limits VPD page) */
if (num > sdebug_store_sectors) {
- mk_sense_buffer(devi, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
return check_condition_result;
}
return 0;
@@ -1704,7 +1837,6 @@ static int check_device_access_params(struct sdebug_dev_info *devi,
/* Returns number of bytes copied or -1 if error. */
static int do_device_access(struct scsi_cmnd *scmd,
- struct sdebug_dev_info *devi,
unsigned long long lba, unsigned int num, int write)
{
int ret;
@@ -1861,13 +1993,12 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
}
static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
- unsigned int num, struct sdebug_dev_info *devip,
- u32 ei_lba)
+ unsigned int num, u32 ei_lba)
{
unsigned long iflags;
int ret;
- ret = check_device_access_params(devip, lba, num);
+ ret = check_device_access_params(SCpnt, lba, num);
if (ret)
return ret;
@@ -1875,16 +2006,16 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
(lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
/* claim unrecoverable read error */
- mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
+ mk_sense_buffer(SCpnt, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
/* set info field and valid bit for fixed descriptor */
- if (0x70 == (devip->sense_buff[0] & 0x7f)) {
- devip->sense_buff[0] |= 0x80; /* Valid bit */
+ if (0x70 == (SCpnt->sense_buffer[0] & 0x7f)) {
+ SCpnt->sense_buffer[0] |= 0x80; /* Valid bit */
ret = (lba < OPT_MEDIUM_ERR_ADDR)
? OPT_MEDIUM_ERR_ADDR : (int)lba;
- devip->sense_buff[3] = (ret >> 24) & 0xff;
- devip->sense_buff[4] = (ret >> 16) & 0xff;
- devip->sense_buff[5] = (ret >> 8) & 0xff;
- devip->sense_buff[6] = ret & 0xff;
+ SCpnt->sense_buffer[3] = (ret >> 24) & 0xff;
+ SCpnt->sense_buffer[4] = (ret >> 16) & 0xff;
+ SCpnt->sense_buffer[5] = (ret >> 8) & 0xff;
+ SCpnt->sense_buffer[6] = ret & 0xff;
}
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
return check_condition_result;
@@ -1898,12 +2029,12 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
if (prot_ret) {
read_unlock_irqrestore(&atomic_rw, iflags);
- mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);
+ mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(SCpnt, devip, lba, num, 0);
+ ret = do_device_access(SCpnt, lba, num, 0);
read_unlock_irqrestore(&atomic_rw, iflags);
if (ret == -1)
return DID_ERROR << 16;
@@ -1915,22 +2046,23 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
void dump_sector(unsigned char *buf, int len)
{
- int i, j;
-
- printk(KERN_ERR ">>> Sector Dump <<<\n");
+ int i, j, n;
+ pr_err(">>> Sector Dump <<<\n");
for (i = 0 ; i < len ; i += 16) {
- printk(KERN_ERR "%04d: ", i);
+ char b[128];
- for (j = 0 ; j < 16 ; j++) {
+ for (j = 0, n = 0; j < 16; j++) {
unsigned char c = buf[i+j];
+
if (c >= 0x20 && c < 0x7e)
- printk(" %c ", buf[i+j]);
+ n += scnprintf(b + n, sizeof(b) - n,
+ " %c ", buf[i+j]);
else
- printk("%02x ", buf[i+j]);
+ n += scnprintf(b + n, sizeof(b) - n,
+ "%02x ", buf[i+j]);
}
-
- printk("\n");
+ pr_err("%04d: %s\n", i, b);
}
}
@@ -2092,13 +2224,12 @@ static void unmap_region(sector_t lba, unsigned int len)
}
static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
- unsigned int num, struct sdebug_dev_info *devip,
- u32 ei_lba)
+ unsigned int num, u32 ei_lba)
{
unsigned long iflags;
int ret;
- ret = check_device_access_params(devip, lba, num);
+ ret = check_device_access_params(SCpnt, lba, num);
if (ret)
return ret;
@@ -2110,12 +2241,13 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
if (prot_ret) {
write_unlock_irqrestore(&atomic_rw, iflags);
- mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10,
+ prot_ret);
return illegal_condition_result;
}
}
- ret = do_device_access(SCpnt, devip, lba, num, 1);
+ ret = do_device_access(SCpnt, lba, num, 1);
if (scsi_debug_lbp())
map_region(lba, num);
write_unlock_irqrestore(&atomic_rw, iflags);
@@ -2123,26 +2255,26 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,
return (DID_ERROR << 16);
else if ((ret < (num * scsi_debug_sector_size)) &&
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
- printk(KERN_INFO "scsi_debug: write: cdb indicated=%u, "
- " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, num * scsi_debug_sector_size, ret);
return 0;
}
static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
- unsigned int num, struct sdebug_dev_info *devip,
- u32 ei_lba, unsigned int unmap)
+ unsigned int num, u32 ei_lba, unsigned int unmap)
{
unsigned long iflags;
unsigned long long i;
int ret;
- ret = check_device_access_params(devip, lba, num);
+ ret = check_device_access_params(scmd, lba, num);
if (ret)
return ret;
if (num > scsi_debug_write_same_length) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ mk_sense_buffer(scmd, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
0);
return check_condition_result;
}
@@ -2164,8 +2296,10 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
return (DID_ERROR << 16);
} else if ((ret < (num * scsi_debug_sector_size)) &&
(SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
- printk(KERN_INFO "scsi_debug: write same: cdb indicated=%u, "
- " IO sent=%d bytes\n", num * scsi_debug_sector_size, ret);
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
+ my_name, "write same",
+ num * scsi_debug_sector_size, ret);
/* Copy first sector to remaining blocks */
for (i = 1 ; i < num ; i++)
@@ -2195,7 +2329,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
int ret;
unsigned long iflags;
- ret = check_readiness(scmd, 1, devip);
+ ret = check_readiness(scmd, UAS_ONLY, devip);
if (ret)
return ret;
@@ -2221,7 +2355,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)
unsigned long long lba = get_unaligned_be64(&desc[i].lba);
unsigned int num = get_unaligned_be32(&desc[i].blocks);
- ret = check_device_access_params(devip, lba, num);
+ ret = check_device_access_params(scmd, lba, num);
if (ret)
goto out;
@@ -2247,7 +2381,7 @@ static int resp_get_lba_status(struct scsi_cmnd * scmd,
unsigned char arr[SDEBUG_GET_LBA_STATUS_LEN];
int ret;
- ret = check_readiness(scmd, 1, devip);
+ ret = check_readiness(scmd, UAS_ONLY, devip);
if (ret)
return ret;
@@ -2257,7 +2391,7 @@ static int resp_get_lba_status(struct scsi_cmnd * scmd,
if (alloc_len < 24)
return 0;
- ret = check_device_access_params(devip, lba, 1);
+ ret = check_device_access_params(scmd, lba, 1);
if (ret)
return ret;
@@ -2278,7 +2412,8 @@ static int resp_report_luns(struct scsi_cmnd * scp,
struct sdebug_dev_info * devip)
{
unsigned int alloc_len;
- int lun_cnt, i, upper, num, n, wlun, lun;
+ int lun_cnt, i, upper, num, n;
+ u64 wlun, lun;
unsigned char *cmd = (unsigned char *)scp->cmnd;
int select_report = (int)cmd[2];
struct scsi_lun *one_lun;
@@ -2287,7 +2422,7 @@ static int resp_report_luns(struct scsi_cmnd * scp,
alloc_len = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
if ((alloc_len < 4) || (select_report > 2)) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
0);
return check_condition_result;
}
@@ -2341,7 +2476,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
/* better not to use temporary buffer. */
buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC);
if (!buf) {
- mk_sense_buffer(devip, NOT_READY,
+ mk_sense_buffer(scp, NOT_READY,
LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
return check_condition_result;
}
@@ -2365,34 +2500,125 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
return 0;
}
-/* When timer goes off this function is called. */
-static void timer_intr_handler(unsigned long indx)
+/* When timer or tasklet goes off this function is called. */
+static void sdebug_q_cmd_complete(unsigned long indx)
{
- struct sdebug_queued_cmd * sqcp;
+ int qa_indx;
+ int retiring = 0;
unsigned long iflags;
+ struct sdebug_queued_cmd *sqcp;
+ struct scsi_cmnd *scp;
+ struct sdebug_dev_info *devip;
- if (indx >= scsi_debug_max_queue) {
- printk(KERN_ERR "scsi_debug:timer_intr_handler: indx too "
- "large\n");
+ atomic_inc(&sdebug_completions);
+ qa_indx = indx;
+ if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
+ pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
return;
}
spin_lock_irqsave(&queued_arr_lock, iflags);
- sqcp = &queued_arr[(int)indx];
- if (! sqcp->in_use) {
- printk(KERN_ERR "scsi_debug:timer_intr_handler: Unexpected "
- "interrupt\n");
+ sqcp = &queued_arr[qa_indx];
+ scp = sqcp->a_cmnd;
+ if (NULL == scp) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: scp is NULL\n", __func__);
+ return;
+ }
+ devip = (struct sdebug_dev_info *)scp->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ else
+ pr_err("%s: devip=NULL\n", __func__);
+ if (atomic_read(&retired_max_queue) > 0)
+ retiring = 1;
+
+ sqcp->a_cmnd = NULL;
+ if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: Unexpected completion\n", __func__);
return;
}
- sqcp->in_use = 0;
- if (sqcp->done_funct) {
- sqcp->a_cmnd->result = sqcp->scsi_result;
- sqcp->done_funct(sqcp->a_cmnd); /* callback to mid level */
+
+ if (unlikely(retiring)) { /* user has reduced max_queue */
+ int k, retval;
+
+ retval = atomic_read(&retired_max_queue);
+ if (qa_indx >= retval) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: index %d too large\n", __func__, retval);
+ return;
+ }
+ k = find_last_bit(queued_in_use_bm, retval);
+ if ((k < scsi_debug_max_queue) || (k == retval))
+ atomic_set(&retired_max_queue, 0);
+ else
+ atomic_set(&retired_max_queue, k + 1);
}
- sqcp->done_funct = NULL;
spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ scp->scsi_done(scp); /* callback to mid level */
}
+/* When high resolution timer goes off this function is called. */
+static enum hrtimer_restart
+sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
+{
+ int qa_indx;
+ int retiring = 0;
+ unsigned long iflags;
+ struct sdebug_hrtimer *sd_hrtp = (struct sdebug_hrtimer *)timer;
+ struct sdebug_queued_cmd *sqcp;
+ struct scsi_cmnd *scp;
+ struct sdebug_dev_info *devip;
+
+ atomic_inc(&sdebug_completions);
+ qa_indx = sd_hrtp->qa_indx;
+ if ((qa_indx < 0) || (qa_indx >= SCSI_DEBUG_CANQUEUE)) {
+ pr_err("%s: wild qa_indx=%d\n", __func__, qa_indx);
+ goto the_end;
+ }
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ sqcp = &queued_arr[qa_indx];
+ scp = sqcp->a_cmnd;
+ if (NULL == scp) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: scp is NULL\n", __func__);
+ goto the_end;
+ }
+ devip = (struct sdebug_dev_info *)scp->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ else
+ pr_err("%s: devip=NULL\n", __func__);
+ if (atomic_read(&retired_max_queue) > 0)
+ retiring = 1;
+
+ sqcp->a_cmnd = NULL;
+ if (!test_and_clear_bit(qa_indx, queued_in_use_bm)) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: Unexpected completion\n", __func__);
+ goto the_end;
+ }
+
+ if (unlikely(retiring)) { /* user has reduced max_queue */
+ int k, retval;
+
+ retval = atomic_read(&retired_max_queue);
+ if (qa_indx >= retval) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ pr_err("%s: index %d too large\n", __func__, retval);
+ goto the_end;
+ }
+ k = find_last_bit(queued_in_use_bm, retval);
+ if ((k < scsi_debug_max_queue) || (k == retval))
+ atomic_set(&retired_max_queue, 0);
+ else
+ atomic_set(&retired_max_queue, k + 1);
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ scp->scsi_done(scp); /* callback to mid level */
+the_end:
+ return HRTIMER_NORESTART;
+}
static struct sdebug_dev_info *
sdebug_device_create(struct sdebug_host_info *sdbg_host, gfp_t flags)
@@ -2418,7 +2644,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
return devip;
sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
if (!sdbg_host) {
- printk(KERN_ERR "Host info NULL\n");
+ pr_err("%s: Host info NULL\n", __func__);
return NULL;
}
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
@@ -2444,15 +2670,9 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
open_devip->target = sdev->id;
open_devip->lun = sdev->lun;
open_devip->sdbg_host = sdbg_host;
- open_devip->reset = 1;
+ atomic_set(&open_devip->num_in_q, 0);
+ set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
open_devip->used = 1;
- memset(open_devip->sense_buff, 0, SDEBUG_SENSE_LEN);
- if (scsi_debug_dsense)
- open_devip->sense_buff[0] = 0x72;
- else {
- open_devip->sense_buff[0] = 0x70;
- open_devip->sense_buff[7] = 0xa;
- }
if (sdev->lun == SAM2_WLUN_REPORT_LUNS)
open_devip->wlun = SAM2_WLUN_REPORT_LUNS & 0xff;
@@ -2462,7 +2682,7 @@ static struct sdebug_dev_info * devInfoReg(struct scsi_device * sdev)
static int scsi_debug_slave_alloc(struct scsi_device *sdp)
{
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
+ printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
return 0;
@@ -2473,7 +2693,7 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
struct sdebug_dev_info *devip;
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %u>\n",
+ printk(KERN_INFO "scsi_debug: slave_configure <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (sdp->host->max_cmd_len != SCSI_DEBUG_MAX_CMD_LEN)
sdp->host->max_cmd_len = SCSI_DEBUG_MAX_CMD_LEN;
@@ -2481,10 +2701,11 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
if (NULL == devip)
return 1; /* no resources, will be marked offline */
sdp->hostdata = devip;
+ sdp->tagged_supported = 1;
if (sdp->host->cmd_per_lun)
- scsi_adjust_queue_depth(sdp, SDEBUG_TAGGED_QUEUING,
- sdp->host->cmd_per_lun);
- blk_queue_max_segment_size(sdp->request_queue, 256 * 1024);
+ scsi_adjust_queue_depth(sdp, DEF_TAGGED_QUEUING,
+ DEF_CMD_PER_LUN);
+ blk_queue_max_segment_size(sdp->request_queue, -1U);
if (scsi_debug_no_uld)
sdp->no_uld_attach = 1;
return 0;
@@ -2496,7 +2717,7 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
(struct sdebug_dev_info *)sdp->hostdata;
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %u>\n",
+ printk(KERN_INFO "scsi_debug: slave_destroy <%u %u %u %llu>\n",
sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
if (devip) {
/* make this slot available for re-use */
@@ -2505,150 +2726,230 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-/* Returns 1 if found 'cmnd' and deleted its timer. else returns 0 */
+/* Returns 1 if cmnd found (deletes its timer or tasklet), else returns 0 */
static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
{
unsigned long iflags;
- int k;
+ int k, qmax, r_qmax;
struct sdebug_queued_cmd *sqcp;
+ struct sdebug_dev_info *devip;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < scsi_debug_max_queue; ++k) {
- sqcp = &queued_arr[k];
- if (sqcp->in_use && (cmnd == sqcp->a_cmnd)) {
- del_timer_sync(&sqcp->cmnd_timer);
- sqcp->in_use = 0;
- sqcp->a_cmnd = NULL;
- break;
+ qmax = scsi_debug_max_queue;
+ r_qmax = atomic_read(&retired_max_queue);
+ if (r_qmax > qmax)
+ qmax = r_qmax;
+ for (k = 0; k < qmax; ++k) {
+ if (test_bit(k, queued_in_use_bm)) {
+ sqcp = &queued_arr[k];
+ if (cmnd == sqcp->a_cmnd) {
+ if (scsi_debug_ndelay > 0) {
+ if (sqcp->sd_hrtp)
+ hrtimer_cancel(
+ &sqcp->sd_hrtp->hrt);
+ } else if (scsi_debug_delay > 0) {
+ if (sqcp->cmnd_timerp)
+ del_timer_sync(
+ sqcp->cmnd_timerp);
+ } else if (scsi_debug_delay < 0) {
+ if (sqcp->tletp)
+ tasklet_kill(sqcp->tletp);
+ }
+ __clear_bit(k, queued_in_use_bm);
+ devip = (struct sdebug_dev_info *)
+ cmnd->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ sqcp->a_cmnd = NULL;
+ break;
+ }
}
}
spin_unlock_irqrestore(&queued_arr_lock, iflags);
- return (k < scsi_debug_max_queue) ? 1 : 0;
+ return (k < qmax) ? 1 : 0;
}
-/* Deletes (stops) timers of all queued commands */
+/* Deletes (stops) timers or tasklets of all queued commands */
static void stop_all_queued(void)
{
unsigned long iflags;
int k;
struct sdebug_queued_cmd *sqcp;
+ struct sdebug_dev_info *devip;
spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < scsi_debug_max_queue; ++k) {
- sqcp = &queued_arr[k];
- if (sqcp->in_use && sqcp->a_cmnd) {
- del_timer_sync(&sqcp->cmnd_timer);
- sqcp->in_use = 0;
- sqcp->a_cmnd = NULL;
+ for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ if (test_bit(k, queued_in_use_bm)) {
+ sqcp = &queued_arr[k];
+ if (sqcp->a_cmnd) {
+ if (scsi_debug_ndelay > 0) {
+ if (sqcp->sd_hrtp)
+ hrtimer_cancel(
+ &sqcp->sd_hrtp->hrt);
+ } else if (scsi_debug_delay > 0) {
+ if (sqcp->cmnd_timerp)
+ del_timer_sync(
+ sqcp->cmnd_timerp);
+ } else if (scsi_debug_delay < 0) {
+ if (sqcp->tletp)
+ tasklet_kill(sqcp->tletp);
+ }
+ __clear_bit(k, queued_in_use_bm);
+ devip = (struct sdebug_dev_info *)
+ sqcp->a_cmnd->device->hostdata;
+ if (devip)
+ atomic_dec(&devip->num_in_q);
+ sqcp->a_cmnd = NULL;
+ }
}
}
spin_unlock_irqrestore(&queued_arr_lock, iflags);
}
-static int scsi_debug_abort(struct scsi_cmnd * SCpnt)
+/* Free queued command memory on heap */
+static void free_all_queued(void)
{
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: abort\n");
- ++num_aborts;
- stop_queued_cmnd(SCpnt);
- return SUCCESS;
+ unsigned long iflags;
+ int k;
+ struct sdebug_queued_cmd *sqcp;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ for (k = 0; k < SCSI_DEBUG_CANQUEUE; ++k) {
+ sqcp = &queued_arr[k];
+ kfree(sqcp->cmnd_timerp);
+ sqcp->cmnd_timerp = NULL;
+ kfree(sqcp->tletp);
+ sqcp->tletp = NULL;
+ kfree(sqcp->sd_hrtp);
+ sqcp->sd_hrtp = NULL;
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
}
-static int scsi_debug_biosparam(struct scsi_device *sdev,
- struct block_device * bdev, sector_t capacity, int *info)
+static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- int res;
- unsigned char *buf;
-
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: biosparam\n");
- buf = scsi_bios_ptable(bdev);
- if (buf) {
- res = scsi_partsize(buf, capacity,
- &info[2], &info[0], &info[1]);
- kfree(buf);
- if (! res)
- return res;
- }
- info[0] = sdebug_heads;
- info[1] = sdebug_sectors_per;
- info[2] = sdebug_cylinders_per;
- return 0;
+ ++num_aborts;
+ if (SCpnt) {
+ if (SCpnt->device &&
+ (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, SCpnt->device, "%s\n",
+ __func__);
+ stop_queued_cmnd(SCpnt);
+ }
+ return SUCCESS;
}
static int scsi_debug_device_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_dev_info * devip;
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: device_reset\n");
++num_dev_resets;
- if (SCpnt) {
- devip = devInfoReg(SCpnt->device);
+ if (SCpnt && SCpnt->device) {
+ struct scsi_device *sdp = SCpnt->device;
+
+ if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+ devip = devInfoReg(sdp);
if (devip)
- devip->reset = 1;
+ set_bit(SDEBUG_UA_POR, devip->uas_bm);
+ }
+ return SUCCESS;
+}
+
+static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
+{
+ struct sdebug_host_info *sdbg_host;
+ struct sdebug_dev_info *devip;
+ struct scsi_device *sdp;
+ struct Scsi_Host *hp;
+ int k = 0;
+
+ ++num_target_resets;
+ if (!SCpnt)
+ goto lie;
+ sdp = SCpnt->device;
+ if (!sdp)
+ goto lie;
+ if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+ hp = sdp->host;
+ if (!hp)
+ goto lie;
+ sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
+ if (sdbg_host) {
+ list_for_each_entry(devip,
+ &sdbg_host->dev_info_list,
+ dev_list)
+ if (devip->target == sdp->id) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
}
+ if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp,
+ "%s: %d device(s) found in target\n", __func__, k);
+lie:
return SUCCESS;
}
static int scsi_debug_bus_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_host_info *sdbg_host;
- struct sdebug_dev_info * dev_info;
+ struct sdebug_dev_info *devip;
struct scsi_device * sdp;
struct Scsi_Host * hp;
+ int k = 0;
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: bus_reset\n");
++num_bus_resets;
- if (SCpnt && ((sdp = SCpnt->device)) && ((hp = sdp->host))) {
+ if (!(SCpnt && SCpnt->device))
+ goto lie;
+ sdp = SCpnt->device;
+ if (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
+ hp = sdp->host;
+ if (hp) {
sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
if (sdbg_host) {
- list_for_each_entry(dev_info,
+ list_for_each_entry(devip,
&sdbg_host->dev_info_list,
- dev_list)
- dev_info->reset = 1;
+ dev_list) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
}
}
+ if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp,
+ "%s: %d device(s) found in host\n", __func__, k);
+lie:
return SUCCESS;
}
static int scsi_debug_host_reset(struct scsi_cmnd * SCpnt)
{
struct sdebug_host_info * sdbg_host;
- struct sdebug_dev_info * dev_info;
+ struct sdebug_dev_info *devip;
+ int k = 0;
- if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: host_reset\n");
++num_host_resets;
+ if ((SCpnt->device) && (SCSI_DEBUG_OPT_ALL_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
spin_lock(&sdebug_host_list_lock);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
- list_for_each_entry(dev_info, &sdbg_host->dev_info_list,
- dev_list)
- dev_info->reset = 1;
+ list_for_each_entry(devip, &sdbg_host->dev_info_list,
+ dev_list) {
+ set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
+ ++k;
+ }
}
spin_unlock(&sdebug_host_list_lock);
stop_all_queued();
+ if (SCSI_DEBUG_OPT_RESET_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: %d device(s) found\n", __func__, k);
return SUCCESS;
}
-/* Initializes timers in queued array */
-static void __init init_all_queued(void)
-{
- unsigned long iflags;
- int k;
- struct sdebug_queued_cmd * sqcp;
-
- spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < scsi_debug_max_queue; ++k) {
- sqcp = &queued_arr[k];
- init_timer(&sqcp->cmnd_timer);
- sqcp->in_use = 0;
- sqcp->a_cmnd = NULL;
- }
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
-}
-
static void __init sdebug_build_parts(unsigned char *ramp,
unsigned long store_size)
{
@@ -2662,8 +2963,8 @@ static void __init sdebug_build_parts(unsigned char *ramp,
return;
if (scsi_debug_num_parts > SDEBUG_MAX_PARTS) {
scsi_debug_num_parts = SDEBUG_MAX_PARTS;
- printk(KERN_WARNING "scsi_debug:build_parts: reducing "
- "partitions to %d\n", SDEBUG_MAX_PARTS);
+ pr_warn("%s: reducing partitions to %d\n", __func__,
+ SDEBUG_MAX_PARTS);
}
num_sectors = (int)sdebug_store_sectors;
sectors_per_part = (num_sectors - sdebug_sectors_per)
@@ -2700,62 +3001,130 @@ static void __init sdebug_build_parts(unsigned char *ramp,
}
}
-static int schedule_resp(struct scsi_cmnd * cmnd,
- struct sdebug_dev_info * devip,
- done_funct_t done, int scsi_result, int delta_jiff)
+static int
+schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
+ int scsi_result, int delta_jiff)
{
- if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmnd) {
- if (scsi_result) {
- struct scsi_device * sdp = cmnd->device;
+ unsigned long iflags;
+ int k, num_in_q, tsf, qdepth, inject;
+ struct sdebug_queued_cmd *sqcp = NULL;
+ struct scsi_device *sdp = cmnd->device;
+
+ if (NULL == cmnd || NULL == devip) {
+ pr_warn("%s: called with NULL cmnd or devip pointer\n",
+ __func__);
+ /* no particularly good error to report back */
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
+ __func__, scsi_result);
+ if (delta_jiff == 0) {
+ /* using same thread to call back mid-layer */
+ cmnd->result = scsi_result;
+ cmnd->scsi_done(cmnd);
+ return 0;
+ }
- printk(KERN_INFO "scsi_debug: <%u %u %u %u> "
- "non-zero result=0x%x\n", sdp->host->host_no,
- sdp->channel, sdp->id, sdp->lun, scsi_result);
+ /* deferred response cases */
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ num_in_q = atomic_read(&devip->num_in_q);
+ qdepth = cmnd->device->queue_depth;
+ k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
+ tsf = 0;
+ inject = 0;
+ if ((qdepth > 0) && (num_in_q >= qdepth))
+ tsf = 1;
+ else if ((scsi_debug_every_nth != 0) &&
+ (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) {
+ if ((num_in_q == (qdepth - 1)) &&
+ (atomic_inc_return(&sdebug_a_tsf) >=
+ abs(scsi_debug_every_nth))) {
+ atomic_set(&sdebug_a_tsf, 0);
+ inject = 1;
+ tsf = 1;
}
}
- if (cmnd && devip) {
- /* simulate autosense by this driver */
- if (SAM_STAT_CHECK_CONDITION == (scsi_result & 0xff))
- memcpy(cmnd->sense_buffer, devip->sense_buff,
- (SCSI_SENSE_BUFFERSIZE > SDEBUG_SENSE_LEN) ?
- SDEBUG_SENSE_LEN : SCSI_SENSE_BUFFERSIZE);
- }
- if (delta_jiff <= 0) {
- if (cmnd)
- cmnd->result = scsi_result;
- if (done)
- done(cmnd);
- return 0;
- } else {
- unsigned long iflags;
- int k;
- struct sdebug_queued_cmd * sqcp = NULL;
- spin_lock_irqsave(&queued_arr_lock, iflags);
- for (k = 0; k < scsi_debug_max_queue; ++k) {
- sqcp = &queued_arr[k];
- if (! sqcp->in_use)
- break;
+ /* if (tsf) simulate device reporting SCSI status of TASK SET FULL.
+ * Might override existing CHECK CONDITION. */
+ if (tsf)
+ scsi_result = device_qfull_result;
+ if (k >= scsi_debug_max_queue) {
+ if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
+ tsf = 1;
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, sdp,
+ "%s: num_in_q=%d, bypass q, %s%s\n",
+ __func__, num_in_q,
+ (inject ? "<inject> " : ""),
+ (tsf ? "status: TASK SET FULL" :
+ "report: host busy"));
+ if (tsf) {
+ /* queued_arr full so respond in same thread */
+ cmnd->result = scsi_result;
+ cmnd->scsi_done(cmnd);
+ /* As scsi_done() is called "inline" must return 0 */
+ return 0;
+ } else
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ __set_bit(k, queued_in_use_bm);
+ atomic_inc(&devip->num_in_q);
+ sqcp = &queued_arr[k];
+ sqcp->a_cmnd = cmnd;
+ cmnd->result = scsi_result;
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (delta_jiff > 0) {
+ if (NULL == sqcp->cmnd_timerp) {
+ sqcp->cmnd_timerp = kmalloc(sizeof(struct timer_list),
+ GFP_ATOMIC);
+ if (NULL == sqcp->cmnd_timerp)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ init_timer(sqcp->cmnd_timerp);
}
- if (k >= scsi_debug_max_queue) {
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- printk(KERN_WARNING "scsi_debug: can_queue exceeded\n");
- return 1; /* report busy to mid level */
+ sqcp->cmnd_timerp->function = sdebug_q_cmd_complete;
+ sqcp->cmnd_timerp->data = k;
+ sqcp->cmnd_timerp->expires = get_jiffies_64() + delta_jiff;
+ add_timer(sqcp->cmnd_timerp);
+ } else if (scsi_debug_ndelay > 0) {
+ ktime_t kt = ktime_set(0, scsi_debug_ndelay);
+ struct sdebug_hrtimer *sd_hp = sqcp->sd_hrtp;
+
+ if (NULL == sd_hp) {
+ sd_hp = kmalloc(sizeof(*sd_hp), GFP_ATOMIC);
+ if (NULL == sd_hp)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ sqcp->sd_hrtp = sd_hp;
+ hrtimer_init(&sd_hp->hrt, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ sd_hp->hrt.function = sdebug_q_cmd_hrt_complete;
+ sd_hp->qa_indx = k;
}
- sqcp->in_use = 1;
- sqcp->a_cmnd = cmnd;
- sqcp->scsi_result = scsi_result;
- sqcp->done_funct = done;
- sqcp->cmnd_timer.function = timer_intr_handler;
- sqcp->cmnd_timer.data = k;
- sqcp->cmnd_timer.expires = jiffies + delta_jiff;
- add_timer(&sqcp->cmnd_timer);
- spin_unlock_irqrestore(&queued_arr_lock, iflags);
- if (cmnd)
- cmnd->result = 0;
- return 0;
+ hrtimer_start(&sd_hp->hrt, kt, HRTIMER_MODE_REL);
+ } else { /* delay < 0 */
+ if (NULL == sqcp->tletp) {
+ sqcp->tletp = kmalloc(sizeof(*sqcp->tletp),
+ GFP_ATOMIC);
+ if (NULL == sqcp->tletp)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ tasklet_init(sqcp->tletp,
+ sdebug_q_cmd_complete, k);
+ }
+ if (-1 == delta_jiff)
+ tasklet_hi_schedule(sqcp->tletp);
+ else
+ tasklet_schedule(sqcp->tletp);
}
+ if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts))
+ sdev_printk(KERN_INFO, sdp,
+ "%s: num_in_q=%d +1, %s%s\n", __func__,
+ num_in_q, (inject ? "<inject> " : ""),
+ "status: TASK SET FULL");
+ return 0;
}
+
/* Note: The following macros create attribute files in the
/sys/module/scsi_debug/parameters directory. Unfortunately this
driver is unaware of a change and cannot trigger auxiliary actions
@@ -2773,6 +3142,7 @@ module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);
+module_param_named(host_lock, scsi_debug_host_lock, bool, S_IRUGO | S_IWUSR);
module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
@@ -2780,6 +3150,7 @@ module_param_named(lbprz, scsi_debug_lbprz, int, S_IRUGO);
module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
+module_param_named(ndelay, scsi_debug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
@@ -2809,7 +3180,7 @@ MODULE_VERSION(SCSI_DEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
-MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
+MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
@@ -2817,13 +3188,15 @@ MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
+MODULE_PARM_DESC(host_lock, "use host_lock around all commands (def=0)");
MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lbprz, "unmapped blocks return 0 on read (def=1)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
-MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
+MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
+MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
@@ -2854,9 +3227,7 @@ static const char * scsi_debug_info(struct Scsi_Host * shp)
return sdebug_info;
}
-/* scsi_debug_proc_info
- * Used if the driver currently has no own support for /proc/scsi
- */
+/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int length)
{
char arr[16];
@@ -2871,27 +3242,49 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer, int lengt
return -EINVAL;
scsi_debug_opts = opts;
if (scsi_debug_every_nth != 0)
- scsi_debug_cmnd_count = 0;
+ atomic_set(&sdebug_cmnd_count, 0);
return length;
}
+/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
+ * same for each scsi_debug host (if more than one). Some of the counters
+ * output are not atomics so might be inaccurate in a busy system. */
static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- seq_printf(m, "scsi_debug adapter driver, version "
- "%s [%s]\n"
- "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
- "every_nth=%d(curr:%d)\n"
- "delay=%d, max_luns=%d, scsi_level=%d\n"
- "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
- "number of aborts=%d, device_reset=%d, bus_resets=%d, "
- "host_resets=%d\ndix_reads=%d dix_writes=%d dif_errors=%d\n",
- SCSI_DEBUG_VERSION, scsi_debug_version_date, scsi_debug_num_tgts,
- scsi_debug_dev_size_mb, scsi_debug_opts, scsi_debug_every_nth,
- scsi_debug_cmnd_count, scsi_debug_delay,
- scsi_debug_max_luns, scsi_debug_scsi_level,
- scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
- sdebug_sectors_per, num_aborts, num_dev_resets, num_bus_resets,
- num_host_resets, dix_reads, dix_writes, dif_errors);
+ int f, l;
+ char b[32];
+
+ if (scsi_debug_every_nth > 0)
+ snprintf(b, sizeof(b), " (curr:%d)",
+ ((SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) ?
+ atomic_read(&sdebug_a_tsf) :
+ atomic_read(&sdebug_cmnd_count)));
+ else
+ b[0] = '\0';
+
+ seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n"
+ "num_tgts=%d, shared (ram) size=%d MB, opts=0x%x, "
+ "every_nth=%d%s\n"
+ "delay=%d, ndelay=%d, max_luns=%d, q_completions=%d\n"
+ "sector_size=%d bytes, cylinders=%d, heads=%d, sectors=%d\n"
+ "command aborts=%d; RESETs: device=%d, target=%d, bus=%d, "
+ "host=%d\ndix_reads=%d dix_writes=%d dif_errors=%d "
+ "usec_in_jiffy=%lu\n",
+ SCSI_DEBUG_VERSION, scsi_debug_version_date,
+ scsi_debug_num_tgts, scsi_debug_dev_size_mb, scsi_debug_opts,
+ scsi_debug_every_nth, b, scsi_debug_delay, scsi_debug_ndelay,
+ scsi_debug_max_luns, atomic_read(&sdebug_completions),
+ scsi_debug_sector_size, sdebug_cylinders_per, sdebug_heads,
+ sdebug_sectors_per, num_aborts, num_dev_resets,
+ num_target_resets, num_bus_resets, num_host_resets,
+ dix_reads, dix_writes, dif_errors, TICK_NSEC / 1000);
+
+ f = find_first_bit(queued_in_use_bm, scsi_debug_max_queue);
+ if (f != scsi_debug_max_queue) {
+ l = find_last_bit(queued_in_use_bm, scsi_debug_max_queue);
+ seq_printf(m, " %s BUSY: first,last bits set: %d,%d\n",
+ "queued_in_use_bm", f, l);
+ }
return 0;
}
@@ -2899,23 +3292,69 @@ static ssize_t delay_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);
}
-
+/* Returns -EBUSY if delay is being changed and commands are queued */
static ssize_t delay_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int delay;
- char work[20];
-
- if (1 == sscanf(buf, "%10s", work)) {
- if ((1 == sscanf(work, "%d", &delay)) && (delay >= 0)) {
- scsi_debug_delay = delay;
- return count;
+ int delay, res;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &delay))) {
+ res = count;
+ if (scsi_debug_delay != delay) {
+ unsigned long iflags;
+ int k;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_first_bit(queued_in_use_bm,
+ scsi_debug_max_queue);
+ if (k != scsi_debug_max_queue)
+ res = -EBUSY; /* have queued commands */
+ else {
+ scsi_debug_delay = delay;
+ scsi_debug_ndelay = 0;
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
}
+ return res;
}
return -EINVAL;
}
static DRIVER_ATTR_RW(delay);
+static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ndelay);
+}
+/* Returns -EBUSY if ndelay is being changed and commands are queued */
+/* If > 0 and accepted then scsi_debug_delay is set to DELAY_OVERRIDDEN */
+static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ unsigned long iflags;
+ int ndelay, res, k;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
+ (ndelay >= 0) && (ndelay < 1000000000)) {
+ res = count;
+ if (scsi_debug_ndelay != ndelay) {
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_first_bit(queued_in_use_bm,
+ scsi_debug_max_queue);
+ if (k != scsi_debug_max_queue)
+ res = -EBUSY; /* have queued commands */
+ else {
+ scsi_debug_ndelay = ndelay;
+ scsi_debug_delay = ndelay ? DELAY_OVERRIDDEN
+ : DEF_DELAY;
+ }
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ }
+ return res;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(ndelay);
+
static ssize_t opts_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);
@@ -2939,7 +3378,8 @@ static ssize_t opts_store(struct device_driver *ddp, const char *buf,
return -EINVAL;
opts_done:
scsi_debug_opts = opts;
- scsi_debug_cmnd_count = 0;
+ atomic_set(&sdebug_cmnd_count, 0);
+ atomic_set(&sdebug_a_tsf, 0);
return count;
}
static DRIVER_ATTR_RW(opts);
@@ -2988,7 +3428,24 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
- scsi_debug_fake_rw = n;
+ n = (n > 0);
+ scsi_debug_fake_rw = (scsi_debug_fake_rw > 0);
+ if (scsi_debug_fake_rw != n) {
+ if ((0 == n) && (NULL == fake_storep)) {
+ unsigned long sz =
+ (unsigned long)scsi_debug_dev_size_mb *
+ 1048576;
+
+ fake_storep = vmalloc(sz);
+ if (NULL == fake_storep) {
+ pr_err("%s: out of memory, 9\n",
+ __func__);
+ return -ENOMEM;
+ }
+ memset(fake_storep, 0, sz);
+ }
+ scsi_debug_fake_rw = n;
+ }
return count;
}
return -EINVAL;
@@ -3053,7 +3510,7 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) {
scsi_debug_every_nth = nth;
- scsi_debug_cmnd_count = 0;
+ atomic_set(&sdebug_cmnd_count, 0);
return count;
}
return -EINVAL;
@@ -3082,14 +3539,26 @@ static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);
}
+/* N.B. max_queue can be changed while there are queued commands. In flight
+ * commands beyond the new max_queue will be completed. */
static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int n;
+ unsigned long iflags;
+ int n, k;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SCSI_DEBUG_CANQUEUE)) {
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_last_bit(queued_in_use_bm, SCSI_DEBUG_CANQUEUE);
scsi_debug_max_queue = n;
+ if (SCSI_DEBUG_CANQUEUE == k)
+ atomic_set(&retired_max_queue, 0);
+ else if (k >= n)
+ atomic_set(&retired_max_queue, k + 1);
+ else
+ atomic_set(&retired_max_queue, 0);
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
return count;
}
return -EINVAL;
@@ -3234,6 +3703,40 @@ static ssize_t removable_store(struct device_driver *ddp, const char *buf,
}
static DRIVER_ATTR_RW(removable);
+static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", !!scsi_debug_host_lock);
+}
+/* Returns -EBUSY if host_lock is being changed and commands are queued */
+static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n, res;
+
+ if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
+ bool new_host_lock = (n > 0);
+
+ res = count;
+ if (new_host_lock != scsi_debug_host_lock) {
+ unsigned long iflags;
+ int k;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ k = find_first_bit(queued_in_use_bm,
+ scsi_debug_max_queue);
+ if (k != scsi_debug_max_queue)
+ res = -EBUSY; /* have queued commands */
+ else
+ scsi_debug_host_lock = new_host_lock;
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ }
+ return res;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(host_lock);
+
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
@@ -3266,6 +3769,8 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_ato.attr,
&driver_attr_map.attr,
&driver_attr_removable.attr,
+ &driver_attr_host_lock.attr,
+ &driver_attr_ndelay.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
@@ -3279,6 +3784,17 @@ static int __init scsi_debug_init(void)
int k;
int ret;
+ atomic_set(&sdebug_cmnd_count, 0);
+ atomic_set(&sdebug_completions, 0);
+ atomic_set(&retired_max_queue, 0);
+
+ if (scsi_debug_ndelay >= 1000000000) {
+ pr_warn("%s: ndelay must be less than 1 second, ignored\n",
+ __func__);
+ scsi_debug_ndelay = 0;
+ } else if (scsi_debug_ndelay > 0)
+ scsi_debug_delay = DELAY_OVERRIDDEN;
+
switch (scsi_debug_sector_size) {
case 512:
case 1024:
@@ -3286,7 +3802,7 @@ static int __init scsi_debug_init(void)
case 4096:
break;
default:
- printk(KERN_ERR "scsi_debug_init: invalid sector_size %d\n",
+ pr_err("%s: invalid sector_size %d\n", __func__,
scsi_debug_sector_size);
return -EINVAL;
}
@@ -3300,28 +3816,28 @@ static int __init scsi_debug_init(void)
break;
default:
- printk(KERN_ERR "scsi_debug_init: dif must be 0, 1, 2 or 3\n");
+ pr_err("%s: dif must be 0, 1, 2 or 3\n", __func__);
return -EINVAL;
}
if (scsi_debug_guard > 1) {
- printk(KERN_ERR "scsi_debug_init: guard must be 0 or 1\n");
+ pr_err("%s: guard must be 0 or 1\n", __func__);
return -EINVAL;
}
if (scsi_debug_ato > 1) {
- printk(KERN_ERR "scsi_debug_init: ato must be 0 or 1\n");
+ pr_err("%s: ato must be 0 or 1\n", __func__);
return -EINVAL;
}
if (scsi_debug_physblk_exp > 15) {
- printk(KERN_ERR "scsi_debug_init: invalid physblk_exp %u\n",
+ pr_err("%s: invalid physblk_exp %u\n", __func__,
scsi_debug_physblk_exp);
return -EINVAL;
}
if (scsi_debug_lowest_aligned > 0x3fff) {
- printk(KERN_ERR "scsi_debug_init: lowest_aligned too big: %u\n",
+ pr_err("%s: lowest_aligned too big: %u\n", __func__,
scsi_debug_lowest_aligned);
return -EINVAL;
}
@@ -3349,14 +3865,16 @@ static int __init scsi_debug_init(void)
(sdebug_sectors_per * sdebug_heads);
}
- fake_storep = vmalloc(sz);
- if (NULL == fake_storep) {
- printk(KERN_ERR "scsi_debug_init: out of memory, 1\n");
- return -ENOMEM;
+ if (0 == scsi_debug_fake_rw) {
+ fake_storep = vmalloc(sz);
+ if (NULL == fake_storep) {
+ pr_err("%s: out of memory, 1\n", __func__);
+ return -ENOMEM;
+ }
+ memset(fake_storep, 0, sz);
+ if (scsi_debug_num_parts > 0)
+ sdebug_build_parts(fake_storep, sz);
}
- memset(fake_storep, 0, sz);
- if (scsi_debug_num_parts > 0)
- sdebug_build_parts(fake_storep, sz);
if (scsi_debug_dix) {
int dif_size;
@@ -3364,11 +3882,11 @@ static int __init scsi_debug_init(void)
dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple);
dif_storep = vmalloc(dif_size);
- printk(KERN_ERR "scsi_debug_init: dif_storep %u bytes @ %p\n",
- dif_size, dif_storep);
+ pr_err("%s: dif_storep %u bytes @ %p\n", __func__, dif_size,
+ dif_storep);
if (dif_storep == NULL) {
- printk(KERN_ERR "scsi_debug_init: out of mem. (DIX)\n");
+ pr_err("%s: out of mem. (DIX)\n", __func__);
ret = -ENOMEM;
goto free_vm;
}
@@ -3390,8 +3908,7 @@ static int __init scsi_debug_init(void)
if (scsi_debug_unmap_alignment &&
scsi_debug_unmap_granularity <=
scsi_debug_unmap_alignment) {
- printk(KERN_ERR
- "%s: ERR: unmap_granularity <= unmap_alignment\n",
+ pr_err("%s: ERR: unmap_granularity <= unmap_alignment\n",
__func__);
return -EINVAL;
}
@@ -3399,11 +3916,10 @@ static int __init scsi_debug_init(void)
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long));
- printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n",
- map_size);
+ pr_info("%s: %lu provisioning blocks\n", __func__, map_size);
if (map_storep == NULL) {
- printk(KERN_ERR "scsi_debug_init: out of mem. (MAP)\n");
+ pr_err("%s: out of mem. (MAP)\n", __func__);
ret = -ENOMEM;
goto free_vm;
}
@@ -3417,39 +3933,35 @@ static int __init scsi_debug_init(void)
pseudo_primary = root_device_register("pseudo_0");
if (IS_ERR(pseudo_primary)) {
- printk(KERN_WARNING "scsi_debug: root_device_register() error\n");
+ pr_warn("%s: root_device_register() error\n", __func__);
ret = PTR_ERR(pseudo_primary);
goto free_vm;
}
ret = bus_register(&pseudo_lld_bus);
if (ret < 0) {
- printk(KERN_WARNING "scsi_debug: bus_register error: %d\n",
- ret);
+ pr_warn("%s: bus_register error: %d\n", __func__, ret);
goto dev_unreg;
}
ret = driver_register(&sdebug_driverfs_driver);
if (ret < 0) {
- printk(KERN_WARNING "scsi_debug: driver_register error: %d\n",
- ret);
+ pr_warn("%s: driver_register error: %d\n", __func__, ret);
goto bus_unreg;
}
- init_all_queued();
-
host_to_add = scsi_debug_add_host;
scsi_debug_add_host = 0;
for (k = 0; k < host_to_add; k++) {
if (sdebug_add_adapter()) {
- printk(KERN_ERR "scsi_debug_init: "
- "sdebug_add_adapter failed k=%d\n", k);
+ pr_err("%s: sdebug_add_adapter failed k=%d\n",
+ __func__, k);
break;
}
}
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) {
- printk(KERN_INFO "scsi_debug_init: built %d host(s)\n",
- scsi_debug_add_host);
+ pr_info("%s: built %d host(s)\n", __func__,
+ scsi_debug_add_host);
}
return 0;
@@ -3472,6 +3984,7 @@ static void __exit scsi_debug_exit(void)
int k = scsi_debug_add_host;
stop_all_queued();
+ free_all_queued();
for (; k; k--)
sdebug_remove_adapter();
driver_unregister(&sdebug_driverfs_driver);
@@ -3569,8 +4082,8 @@ static void sdebug_remove_adapter(void)
--scsi_debug_add_host;
}
-static
-int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
+static int
+scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
{
unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
int len, k;
@@ -3589,32 +4102,34 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
int unmap = 0;
scsi_set_resid(SCpnt, 0);
- if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && cmd) {
- printk(KERN_INFO "scsi_debug: cmd ");
- for (k = 0, len = SCpnt->cmd_len; k < len; ++k)
- printk("%02x ", (int)cmd[k]);
- printk("\n");
- }
-
- if (target == SCpnt->device->host->hostt->this_id) {
- printk(KERN_INFO "scsi_debug: initiator's id used as "
- "target!\n");
- return schedule_resp(SCpnt, NULL, done,
- DID_NO_CONNECT << 16, 0);
+ if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
+ !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) {
+ char b[120];
+ int n;
+
+ len = SCpnt->cmd_len;
+ if (len > 32)
+ strcpy(b, "too long, over 32 bytes");
+ else {
+ for (k = 0, n = 0; k < len; ++k)
+ n += scnprintf(b + n, sizeof(b) - n, "%02x ",
+ (unsigned int)cmd[k]);
+ }
+ sdev_printk(KERN_INFO, SCpnt->device, "%s: cmd %s\n", my_name,
+ b);
}
if ((SCpnt->device->lun >= scsi_debug_max_luns) &&
(SCpnt->device->lun != SAM2_WLUN_REPORT_LUNS))
- return schedule_resp(SCpnt, NULL, done,
- DID_NO_CONNECT << 16, 0);
+ return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
devip = devInfoReg(SCpnt->device);
if (NULL == devip)
- return schedule_resp(SCpnt, NULL, done,
- DID_NO_CONNECT << 16, 0);
+ return schedule_resp(SCpnt, NULL, DID_NO_CONNECT << 16, 0);
if ((scsi_debug_every_nth != 0) &&
- (++scsi_debug_cmnd_count >= abs(scsi_debug_every_nth))) {
- scsi_debug_cmnd_count = 0;
+ (atomic_inc_return(&sdebug_cmnd_count) >=
+ abs(scsi_debug_every_nth))) {
+ atomic_set(&sdebug_cmnd_count, 0);
if (scsi_debug_every_nth < -1)
scsi_debug_every_nth = -1;
if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
@@ -3645,11 +4160,10 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
printk(KERN_INFO "scsi_debug: Opcode: 0x%x "
"not supported for wlun\n", *cmd);
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_OPCODE, 0);
errsts = check_condition_result;
- return schedule_resp(SCpnt, devip, done, errsts,
- 0);
+ return schedule_resp(SCpnt, devip, errsts, 0);
}
}
@@ -3667,7 +4181,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
errsts = resp_start_stop(SCpnt, devip);
break;
case ALLOW_MEDIUM_REMOVAL:
- errsts = check_readiness(SCpnt, 1, devip);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
if (errsts)
break;
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
@@ -3675,23 +4189,23 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
cmd[4] ? "inhibited" : "enabled");
break;
case SEND_DIAGNOSTIC: /* mandatory */
- errsts = check_readiness(SCpnt, 1, devip);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
break;
case TEST_UNIT_READY: /* mandatory */
- delay_override = 1;
- errsts = check_readiness(SCpnt, 0, devip);
+ /* delay_override = 1; */
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
break;
case RESERVE:
- errsts = check_readiness(SCpnt, 1, devip);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
break;
case RESERVE_10:
- errsts = check_readiness(SCpnt, 1, devip);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
break;
case RELEASE:
- errsts = check_readiness(SCpnt, 1, devip);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
break;
case RELEASE_10:
- errsts = check_readiness(SCpnt, 1, devip);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
break;
case READ_CAPACITY:
errsts = resp_readcap(SCpnt, devip);
@@ -3702,20 +4216,20 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
else if (cmd[1] == SAI_GET_LBA_STATUS) {
if (scsi_debug_lbp() == 0) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
} else
errsts = resp_get_lba_status(SCpnt, devip);
} else {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_OPCODE, 0);
errsts = check_condition_result;
}
break;
case MAINTENANCE_IN:
if (MI_REPORT_TARGET_PGS != cmd[1]) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_OPCODE, 0);
errsts = check_condition_result;
break;
@@ -3728,7 +4242,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
/* READ{10,12,16} and DIF Type 2 are natural enemies */
if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
cmd[1] & 0xe0) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
break;
@@ -3742,7 +4256,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
/* fall through */
case READ_6:
read:
- errsts = check_readiness(SCpnt, 0, devip);
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
if (errsts)
break;
if (scsi_debug_fake_rw)
@@ -3752,20 +4266,21 @@ read:
if (inj_short)
num /= 2;
- errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
+ errsts = resp_read(SCpnt, lba, num, ei_lba);
if (inj_recovered && (0 == errsts)) {
- mk_sense_buffer(devip, RECOVERED_ERROR,
+ mk_sense_buffer(SCpnt, RECOVERED_ERROR,
THRESHOLD_EXCEEDED, 0);
errsts = check_condition_result;
} else if (inj_transport && (0 == errsts)) {
- mk_sense_buffer(devip, ABORTED_COMMAND,
+ mk_sense_buffer(SCpnt, ABORTED_COMMAND,
TRANSPORT_PROBLEM, ACK_NAK_TO);
errsts = check_condition_result;
} else if (inj_dif && (0 == errsts)) {
- mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
+ /* Logical block guard check failed */
+ mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
errsts = illegal_condition_result;
} else if (inj_dix && (0 == errsts)) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
errsts = illegal_condition_result;
}
break;
@@ -3774,7 +4289,7 @@ read:
errsts = resp_report_luns(SCpnt, devip);
break;
case VERIFY: /* 10 byte SBC-2 command */
- errsts = check_readiness(SCpnt, 0, devip);
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
break;
case WRITE_16:
case WRITE_12:
@@ -3782,7 +4297,7 @@ read:
/* WRITE{10,12,16} and DIF Type 2 are natural enemies */
if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
cmd[1] & 0xe0) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
break;
@@ -3796,22 +4311,22 @@ read:
/* fall through */
case WRITE_6:
write:
- errsts = check_readiness(SCpnt, 0, devip);
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
if (errsts)
break;
if (scsi_debug_fake_rw)
break;
get_data_transfer_info(cmd, &lba, &num, &ei_lba);
- errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
+ errsts = resp_write(SCpnt, lba, num, ei_lba);
if (inj_recovered && (0 == errsts)) {
- mk_sense_buffer(devip, RECOVERED_ERROR,
+ mk_sense_buffer(SCpnt, RECOVERED_ERROR,
THRESHOLD_EXCEEDED, 0);
errsts = check_condition_result;
} else if (inj_dif && (0 == errsts)) {
- mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, 1);
+ mk_sense_buffer(SCpnt, ABORTED_COMMAND, 0x10, 1);
errsts = illegal_condition_result;
} else if (inj_dix && (0 == errsts)) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, 1);
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, 0x10, 1);
errsts = illegal_condition_result;
}
break;
@@ -3820,7 +4335,7 @@ write:
if (cmd[1] & 0x8) {
if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
(*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
errsts = check_condition_result;
} else
@@ -3828,19 +4343,23 @@ write:
}
if (errsts)
break;
- errsts = check_readiness(SCpnt, 0, devip);
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
if (errsts)
break;
+ if (scsi_debug_fake_rw)
+ break;
get_data_transfer_info(cmd, &lba, &num, &ei_lba);
- errsts = resp_write_same(SCpnt, lba, num, devip, ei_lba, unmap);
+ errsts = resp_write_same(SCpnt, lba, num, ei_lba, unmap);
break;
case UNMAP:
- errsts = check_readiness(SCpnt, 0, devip);
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
if (errsts)
break;
+ if (scsi_debug_fake_rw)
+ break;
if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
} else
@@ -3861,29 +4380,29 @@ write:
break;
case SYNCHRONIZE_CACHE:
delay_override = 1;
- errsts = check_readiness(SCpnt, 0, devip);
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
break;
case WRITE_BUFFER:
- errsts = check_readiness(SCpnt, 1, devip);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
break;
case XDWRITEREAD_10:
if (!scsi_bidi_cmnd(SCpnt)) {
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
errsts = check_condition_result;
break;
}
- errsts = check_readiness(SCpnt, 0, devip);
+ errsts = check_readiness(SCpnt, UAS_TUR, devip);
if (errsts)
break;
if (scsi_debug_fake_rw)
break;
get_data_transfer_info(cmd, &lba, &num, &ei_lba);
- errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
+ errsts = resp_read(SCpnt, lba, num, ei_lba);
if (errsts)
break;
- errsts = resp_write(SCpnt, lba, num, devip, ei_lba);
+ errsts = resp_write(SCpnt, lba, num, ei_lba);
if (errsts)
break;
errsts = resp_xdwriteread(SCpnt, lba, num, devip);
@@ -3906,27 +4425,138 @@ write:
}
}
- mk_sense_buffer(devip, ILLEGAL_REQUEST,
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
errsts = check_condition_result;
break;
-
+ case 0x85:
+ if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: ATA PASS-THROUGH(16) not supported\n", my_name);
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST,
+ INVALID_OPCODE, 0);
+ errsts = check_condition_result;
+ break;
default:
if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
- printk(KERN_INFO "scsi_debug: Opcode: 0x%x not "
- "supported\n", *cmd);
- errsts = check_readiness(SCpnt, 1, devip);
+ sdev_printk(KERN_INFO, SCpnt->device,
+ "%s: Opcode: 0x%x not supported\n",
+ my_name, *cmd);
+ errsts = check_readiness(SCpnt, UAS_ONLY, devip);
if (errsts)
break; /* Unit attention takes precedence */
- mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
+ mk_sense_buffer(SCpnt, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
errsts = check_condition_result;
break;
}
- return schedule_resp(SCpnt, devip, done, errsts,
+ return schedule_resp(SCpnt, devip, errsts,
(delay_override ? 0 : scsi_debug_delay));
}
-static DEF_SCSI_QCMD(scsi_debug_queuecommand)
+static int
+sdebug_queuecommand_lock_or_not(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ if (scsi_debug_host_lock) {
+ unsigned long iflags;
+ int rc;
+
+ spin_lock_irqsave(shost->host_lock, iflags);
+ rc = scsi_debug_queuecommand(cmd);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
+ return rc;
+ } else
+ return scsi_debug_queuecommand(cmd);
+}
+
+static int
+sdebug_change_qdepth(struct scsi_device *sdev, int qdepth, int reason)
+{
+ int num_in_q = 0;
+ int bad = 0;
+ unsigned long iflags;
+ struct sdebug_dev_info *devip;
+
+ spin_lock_irqsave(&queued_arr_lock, iflags);
+ devip = (struct sdebug_dev_info *)sdev->hostdata;
+ if (NULL == devip) {
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ return -ENODEV;
+ }
+ num_in_q = atomic_read(&devip->num_in_q);
+ spin_unlock_irqrestore(&queued_arr_lock, iflags);
+ if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) {
+ if (qdepth < 1)
+ qdepth = 1;
+ /* allow to exceed max host queued_arr elements for testing */
+ if (qdepth > SCSI_DEBUG_CANQUEUE + 10)
+ qdepth = SCSI_DEBUG_CANQUEUE + 10;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ } else if (reason == SCSI_QDEPTH_QFULL)
+ scsi_track_queue_full(sdev, qdepth);
+ else
+ bad = 1;
+ if (bad)
+ sdev_printk(KERN_WARNING, sdev,
+ "%s: unknown reason=0x%x\n", __func__, reason);
+ if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
+ if (SCSI_QDEPTH_QFULL == reason)
+ sdev_printk(KERN_INFO, sdev,
+ "%s: -> %d, num_in_q=%d, reason: queue full\n",
+ __func__, qdepth, num_in_q);
+ else {
+ const char *cp;
+
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ cp = "default (sysfs ?)";
+ break;
+ case SCSI_QDEPTH_RAMP_UP:
+ cp = "ramp up";
+ break;
+ default:
+ cp = "unknown";
+ break;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "%s: qdepth=%d, num_in_q=%d, reason: %s\n",
+ __func__, qdepth, num_in_q, cp);
+ }
+ }
+ return sdev->queue_depth;
+}
+
+static int
+sdebug_change_qtype(struct scsi_device *sdev, int qtype)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, qtype);
+ if (qtype)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ qtype = 0;
+ if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
+ const char *cp;
+
+ switch (qtype) {
+ case 0:
+ cp = "untagged";
+ break;
+ case MSG_SIMPLE_TAG:
+ cp = "simple tags";
+ break;
+ case MSG_ORDERED_TAG:
+ cp = "ordered tags";
+ break;
+ default:
+ cp = "unknown";
+ break;
+ }
+ sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
+ }
+ return qtype;
+}
static struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
@@ -3938,17 +4568,19 @@ static struct scsi_host_template sdebug_driver_template = {
.slave_configure = scsi_debug_slave_configure,
.slave_destroy = scsi_debug_slave_destroy,
.ioctl = scsi_debug_ioctl,
- .queuecommand = scsi_debug_queuecommand,
+ .queuecommand = sdebug_queuecommand_lock_or_not,
+ .change_queue_depth = sdebug_change_qdepth,
+ .change_queue_type = sdebug_change_qtype,
.eh_abort_handler = scsi_debug_abort,
- .eh_bus_reset_handler = scsi_debug_bus_reset,
.eh_device_reset_handler = scsi_debug_device_reset,
+ .eh_target_reset_handler = scsi_debug_target_reset,
+ .eh_bus_reset_handler = scsi_debug_bus_reset,
.eh_host_reset_handler = scsi_debug_host_reset,
- .bios_param = scsi_debug_biosparam,
.can_queue = SCSI_DEBUG_CANQUEUE,
.this_id = 7,
- .sg_tablesize = 256,
- .cmd_per_lun = 16,
- .max_sectors = 0xffff,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
+ .cmd_per_lun = DEF_CMD_PER_LUN,
+ .max_sectors = -1U,
.use_clustering = DISABLE_CLUSTERING,
.module = THIS_MODULE,
};
@@ -4032,8 +4664,7 @@ static int sdebug_driver_probe(struct device * dev)
} else
scsi_scan_host(hpnt);
-
- return error;
+ return error;
}
static int sdebug_driver_remove(struct device * dev)
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index f969aca0b54e..49014a143c6a 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -222,6 +222,7 @@ static struct {
{"PIONEER", "CD-ROM DRM-602X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-604X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 7e957918f33f..5db8454474ee 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -59,11 +59,11 @@ static int scsi_try_to_abort_cmd(struct scsi_host_template *,
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
- if (shost->host_busy == shost->host_failed) {
+ if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
- SCSI_LOG_ERROR_RECOVERY(5,
- printk("Waking error handler thread\n"));
+ SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
+ "Waking error handler thread\n"));
}
}
@@ -193,7 +193,7 @@ scsi_abort_command(struct scsi_cmnd *scmd)
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"scmd %p previous abort failed\n", scmd));
- cancel_delayed_work(&scmd->abort_work);
+ BUG_ON(delayed_work_pending(&scmd->abort_work));
return FAILED;
}
@@ -319,8 +319,8 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
online = scsi_device_online(sdev);
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __func__,
- online));
+ SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
+ "%s: rtn: %d\n", __func__, online));
return online;
}
@@ -365,8 +365,9 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
}
}
- SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d"
- " devices require eh work\n",
+ SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
+ "Total of %d commands on %d"
+ " devices require eh work\n",
total_failures, devices_failed));
}
#endif
@@ -738,8 +739,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd)
{
struct completion *eh_action;
- SCSI_LOG_ERROR_RECOVERY(3,
- printk("%s scmd: %p result: %x\n",
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s scmd: %p result: %x\n",
__func__, scmd, scmd->result));
eh_action = scmd->device->host->eh_action;
@@ -758,8 +759,8 @@ static int scsi_try_host_reset(struct scsi_cmnd *scmd)
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n",
- __func__));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, host, "Snd Host RST\n"));
if (!hostt->eh_host_reset_handler)
return FAILED;
@@ -788,8 +789,8 @@ static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n",
- __func__));
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s: Snd Bus RST\n", __func__));
if (!hostt->eh_bus_reset_handler)
return FAILED;
@@ -1036,8 +1037,8 @@ retry:
scsi_log_completion(scmd, rtn);
- SCSI_LOG_ERROR_RECOVERY(3,
- printk("%s: scmd: %p, timeleft: %ld\n",
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s: scmd: %p, timeleft: %ld\n",
__func__, scmd, timeleft));
/*
@@ -1051,9 +1052,8 @@ retry:
*/
if (timeleft) {
rtn = scsi_eh_completed_normally(scmd);
- SCSI_LOG_ERROR_RECOVERY(3,
- printk("%s: scsi_eh_completed_normally %x\n",
- __func__, rtn));
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s: scsi_eh_completed_normally %x\n", __func__, rtn));
switch (rtn) {
case SUCCESS:
@@ -1177,9 +1177,9 @@ int scsi_eh_get_sense(struct list_head *work_q,
if (rtn != SUCCESS)
continue;
- SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p"
- " result %x\n", scmd,
- scmd->result));
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "sense requested for %p result %x\n",
+ scmd, scmd->result));
SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd));
rtn = scsi_decide_disposition(scmd);
@@ -1220,8 +1220,8 @@ retry_tur:
rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
scmd->device->eh_timeout, 0);
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n",
- __func__, scmd, rtn));
+ SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
+ "%s: scmd %p rtn %x\n", __func__, scmd, rtn));
switch (rtn) {
case NEEDS_RETRY:
@@ -1323,16 +1323,16 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,
__func__));
return list_empty(work_q);
}
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"
- "0x%p\n", current->comm,
- scmd));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: aborting cmd: 0x%p\n",
+ current->comm, scmd));
rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
if (rtn == FAILED) {
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"
- " cmd failed:"
- "0x%p\n",
- current->comm,
- scmd));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: aborting cmd failed: 0x%p\n",
+ current->comm, scmd));
list_splice_init(&check_list, work_q);
return list_empty(work_q);
}
@@ -1406,8 +1406,10 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
if (!stu_scmd)
continue;
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:"
- " 0x%p\n", current->comm, sdev));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending START_UNIT to sdev: 0x%p\n",
+ current->comm, sdev));
if (!scsi_eh_try_stu(stu_scmd)) {
if (!scsi_device_online(sdev) ||
@@ -1421,8 +1423,9 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
- printk("%s: START_UNIT failed to sdev:"
- " 0x%p\n", current->comm, sdev));
+ shost_printk(KERN_INFO, shost,
+ "%s: START_UNIT failed to sdev:"
+ " 0x%p\n", current->comm, sdev));
}
}
@@ -1468,9 +1471,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
if (!bdr_scmd)
continue;
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:"
- " 0x%p\n", current->comm,
- sdev));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending BDR sdev: 0x%p\n",
+ current->comm, sdev));
rtn = scsi_try_bus_device_reset(bdr_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
if (!scsi_device_online(sdev) ||
@@ -1485,11 +1489,10 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
}
}
} else {
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR"
- " failed sdev:"
- "0x%p\n",
- current->comm,
- sdev));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: BDR failed sdev: 0x%p\n",
+ current->comm, sdev));
}
}
@@ -1533,15 +1536,17 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
id = scmd_id(scmd);
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending target reset "
- "to target %d\n",
- current->comm, id));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending target reset to target %d\n",
+ current->comm, id));
rtn = scsi_try_target_reset(scmd);
if (rtn != SUCCESS && rtn != FAST_IO_FAIL)
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Target reset"
- " failed target: "
- "%d\n",
- current->comm, id));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Target reset failed"
+ " target: %d\n",
+ current->comm, id));
list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
if (scmd_id(scmd) != id)
continue;
@@ -1605,9 +1610,10 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
if (!chan_scmd)
continue;
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:"
- " %d\n", current->comm,
- channel));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending BRST chan: %d\n",
+ current->comm, channel));
rtn = scsi_try_bus_reset(chan_scmd);
if (rtn == SUCCESS || rtn == FAST_IO_FAIL) {
list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
@@ -1621,10 +1627,10 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
}
}
} else {
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST"
- " failed chan: %d\n",
- current->comm,
- channel));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: BRST failed chan: %d\n",
+ current->comm, channel));
}
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 0);
@@ -1635,7 +1641,8 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
* @work_q: list_head for processed commands.
* @done_q: list_head for processed commands.
*/
-static int scsi_eh_host_reset(struct list_head *work_q,
+static int scsi_eh_host_reset(struct Scsi_Host *shost,
+ struct list_head *work_q,
struct list_head *done_q)
{
struct scsi_cmnd *scmd, *next;
@@ -1646,8 +1653,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
scmd = list_entry(work_q->next,
struct scsi_cmnd, eh_entry);
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n"
- , current->comm));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: Sending HRST\n",
+ current->comm));
rtn = scsi_try_host_reset(scmd);
if (rtn == SUCCESS) {
@@ -1657,9 +1666,10 @@ static int scsi_eh_host_reset(struct list_head *work_q,
scsi_eh_finish_cmd(scmd, done_q);
}
} else {
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST"
- " failed\n",
- current->comm));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ shost_printk(KERN_INFO, shost,
+ "%s: HRST failed\n",
+ current->comm));
}
}
return scsi_eh_test_devices(&check_list, work_q, done_q, 1);
@@ -1751,9 +1761,8 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* up to the top level.
*/
if (!scsi_device_online(scmd->device)) {
- SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report"
- " as SUCCESS\n",
- __func__));
+ SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
+ "%s: device offline - report as SUCCESS\n", __func__));
return SUCCESS;
}
@@ -1999,8 +2008,7 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* ioctls to queued block devices.
*/
SCSI_LOG_ERROR_RECOVERY(3,
- printk("scsi_eh_%d waking up host to restart\n",
- shost->host_no));
+ shost_printk(KERN_INFO, shost, "waking up host to restart\n"));
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RUNNING))
@@ -2047,7 +2055,7 @@ void scsi_eh_ready_devs(struct Scsi_Host *shost,
if (!scsi_eh_bus_device_reset(shost, work_q, done_q))
if (!scsi_eh_target_reset(shost, work_q, done_q))
if (!scsi_eh_bus_reset(shost, work_q, done_q))
- if (!scsi_eh_host_reset(work_q, done_q))
+ if (!scsi_eh_host_reset(shost, work_q, done_q))
scsi_eh_offline_sdevs(work_q,
done_q);
}
@@ -2066,10 +2074,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
if (scsi_device_online(scmd->device) &&
!scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush"
- " retry cmd: %p\n",
- current->comm,
- scmd));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: flush retry cmd: %p\n",
+ current->comm, scmd));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
} else {
/*
@@ -2079,9 +2087,10 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
*/
if (!scmd->result)
scmd->result |= (DRIVER_TIMEOUT << 24);
- SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish"
- " cmd: %p\n",
- current->comm, scmd));
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "%s: flush finish cmd: %p\n",
+ current->comm, scmd));
scsi_finish_command(scmd);
}
}
@@ -2155,19 +2164,22 @@ int scsi_error_handler(void *data)
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
- shost->host_failed != shost->host_busy) {
+ shost->host_failed != atomic_read(&shost->host_busy)) {
SCSI_LOG_ERROR_RECOVERY(1,
- printk("scsi_eh_%d: sleeping\n",
- shost->host_no));
+ shost_printk(KERN_INFO, shost,
+ "scsi_eh_%d: sleeping\n",
+ shost->host_no));
schedule();
continue;
}
__set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1,
- printk("scsi_eh_%d: waking up %d/%d/%d\n",
- shost->host_no, shost->host_eh_scheduled,
- shost->host_failed, shost->host_busy));
+ shost_printk(KERN_INFO, shost,
+ "scsi_eh_%d: waking up %d/%d/%d\n",
+ shost->host_no, shost->host_eh_scheduled,
+ shost->host_failed,
+ atomic_read(&shost->host_busy)));
/*
* We have a host that is failing for some reason. Figure out
@@ -2201,7 +2213,9 @@ int scsi_error_handler(void *data)
__set_current_state(TASK_RUNNING);
SCSI_LOG_ERROR_RECOVERY(1,
- printk("Error handler scsi_eh_%d exiting\n", shost->host_no));
+ shost_printk(KERN_INFO, shost,
+ "Error handler scsi_eh_%d exiting\n",
+ shost->host_no));
shost->ehandler = NULL;
return 0;
}
@@ -2362,8 +2376,8 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
* suspended while we performed the TMF.
*/
SCSI_LOG_ERROR_RECOVERY(3,
- printk("%s: waking up host to restart after TMF\n",
- __func__));
+ shost_printk(KERN_INFO, shost,
+ "waking up host to restart after TMF\n"));
wake_up(&shost->host_wait);
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index d9564fb04f62..1aaaf43c6803 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -91,12 +91,14 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
int result;
struct scsi_sense_hdr sshdr;
- SCSI_LOG_IOCTL(1, printk("Trying ioctl with scsi command %d\n", *cmd));
+ SCSI_LOG_IOCTL(1, sdev_printk(KERN_INFO, sdev,
+ "Trying ioctl with scsi command %d\n", *cmd));
result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
&sshdr, timeout, retries, NULL);
- SCSI_LOG_IOCTL(2, printk("Ioctl returned 0x%x\n", result));
+ SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
+ "Ioctl returned 0x%x\n", result));
if ((driver_byte(result) & DRIVER_SENSE) &&
(scsi_sense_valid(&sshdr))) {
@@ -105,9 +107,11 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
if (cmd[0] == ALLOW_MEDIUM_REMOVAL)
sdev->lockable = 0;
else
- printk(KERN_INFO "ioctl_internal_command: "
- "ILLEGAL REQUEST asc=0x%x ascq=0x%x\n",
- sshdr.asc, sshdr.ascq);
+ sdev_printk(KERN_INFO, sdev,
+ "ioctl_internal_command: "
+ "ILLEGAL REQUEST "
+ "asc=0x%x ascq=0x%x\n",
+ sshdr.asc, sshdr.ascq);
break;
case NOT_READY: /* This happens if there is no disc in drive */
if (sdev->removable)
@@ -127,7 +131,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
}
}
- SCSI_LOG_IOCTL(2, printk("IOCTL Releasing command\n"));
+ SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
+ "IOCTL Releasing command\n"));
return result;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3f50dfcb3227..9c44392b748f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1,5 +1,6 @@
/*
- * scsi_lib.c Copyright (C) 1999 Eric Youngdale
+ * Copyright (C) 1999 Eric Youngdale
+ * Copyright (C) 2014 Christoph Hellwig
*
* SCSI queueing library.
* Initial versions: Eric Youngdale (eric@andante.org).
@@ -20,6 +21,7 @@
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
+#include <linux/blk-mq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -29,6 +31,8 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
+#include <trace/events/scsi.h>
+
#include "scsi_priv.h"
#include "scsi_logging.h"
@@ -75,28 +79,12 @@ struct kmem_cache *scsi_sdb_cache;
*/
#define SCSI_QUEUE_DELAY 3
-/**
- * __scsi_queue_insert - private queue insertion
- * @cmd: The SCSI command being requeued
- * @reason: The reason for the requeue
- * @unbusy: Whether the queue should be unbusied
- *
- * This is a private queue insertion. The public interface
- * scsi_queue_insert() always assumes the queue should be unbusied
- * because it's always called before the completion. This function is
- * for a requeue after completion, which should only occur in this
- * file.
- */
-static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void
+scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
struct scsi_target *starget = scsi_target(device);
- struct request_queue *q = device->request_queue;
- unsigned long flags;
-
- SCSI_LOG_MLQUEUE(1,
- printk("Inserting command %p into mlqueue\n", cmd));
/*
* Set the appropriate busy bit for the device/host.
@@ -113,16 +101,52 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
*/
switch (reason) {
case SCSI_MLQUEUE_HOST_BUSY:
- host->host_blocked = host->max_host_blocked;
+ atomic_set(&host->host_blocked, host->max_host_blocked);
break;
case SCSI_MLQUEUE_DEVICE_BUSY:
case SCSI_MLQUEUE_EH_RETRY:
- device->device_blocked = device->max_device_blocked;
+ atomic_set(&device->device_blocked,
+ device->max_device_blocked);
break;
case SCSI_MLQUEUE_TARGET_BUSY:
- starget->target_blocked = starget->max_target_blocked;
+ atomic_set(&starget->target_blocked,
+ starget->max_target_blocked);
break;
}
+}
+
+static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct request_queue *q = cmd->request->q;
+
+ blk_mq_requeue_request(cmd->request);
+ blk_mq_kick_requeue_list(q);
+ put_device(&sdev->sdev_gendev);
+}
+
+/**
+ * __scsi_queue_insert - private queue insertion
+ * @cmd: The SCSI command being requeued
+ * @reason: The reason for the requeue
+ * @unbusy: Whether the queue should be unbusied
+ *
+ * This is a private queue insertion. The public interface
+ * scsi_queue_insert() always assumes the queue should be unbusied
+ * because it's always called before the completion. This function is
+ * for a requeue after completion, which should only occur in this
+ * file.
+ */
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+{
+ struct scsi_device *device = cmd->device;
+ struct request_queue *q = device->request_queue;
+ unsigned long flags;
+
+ SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
+ "Inserting command %p into mlqueue\n", cmd));
+
+ scsi_set_blocked(cmd, reason);
/*
* Decrement the counters, since these commands are no longer
@@ -138,6 +162,10 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
* before blk_cleanup_queue() finishes.
*/
cmd->result = 0;
+ if (q->mq_ops) {
+ scsi_mq_requeue_cmd(cmd);
+ return;
+ }
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
kblockd_schedule_work(&device->requeue_work);
@@ -282,16 +310,26 @@ void scsi_device_unbusy(struct scsi_device *sdev)
struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
- spin_lock_irqsave(shost->host_lock, flags);
- shost->host_busy--;
- starget->target_busy--;
+ atomic_dec(&shost->host_busy);
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
+
if (unlikely(scsi_host_in_recovery(shost) &&
- (shost->host_failed || shost->host_eh_scheduled)))
+ (shost->host_failed || shost->host_eh_scheduled))) {
+ spin_lock_irqsave(shost->host_lock, flags);
scsi_eh_wakeup(shost);
- spin_unlock(shost->host_lock);
- spin_lock(sdev->request_queue->queue_lock);
- sdev->device_busy--;
- spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+ atomic_dec(&sdev->device_busy);
+}
+
+static void scsi_kick_queue(struct request_queue *q)
+{
+ if (q->mq_ops)
+ blk_mq_start_hw_queues(q);
+ else
+ blk_run_queue(q);
}
/*
@@ -318,7 +356,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
- blk_run_queue(current_sdev->request_queue);
+ scsi_kick_queue(current_sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
if (starget->starget_sdev_user)
@@ -331,7 +369,7 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
- blk_run_queue(sdev->request_queue);
+ scsi_kick_queue(sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
scsi_device_put(sdev);
@@ -340,28 +378,36 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev)
spin_unlock_irqrestore(shost->host_lock, flags);
}
-static inline int scsi_device_is_busy(struct scsi_device *sdev)
+static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{
- if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
- return 1;
-
- return 0;
+ if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
+ return true;
+ if (atomic_read(&sdev->device_blocked) > 0)
+ return true;
+ return false;
}
-static inline int scsi_target_is_busy(struct scsi_target *starget)
+static inline bool scsi_target_is_busy(struct scsi_target *starget)
{
- return ((starget->can_queue > 0 &&
- starget->target_busy >= starget->can_queue) ||
- starget->target_blocked);
+ if (starget->can_queue > 0) {
+ if (atomic_read(&starget->target_busy) >= starget->can_queue)
+ return true;
+ if (atomic_read(&starget->target_blocked) > 0)
+ return true;
+ }
+ return false;
}
-static inline int scsi_host_is_busy(struct Scsi_Host *shost)
+static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
- if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
- shost->host_blocked || shost->host_self_blocked)
- return 1;
-
- return 0;
+ if (shost->can_queue > 0 &&
+ atomic_read(&shost->host_busy) >= shost->can_queue)
+ return true;
+ if (atomic_read(&shost->host_blocked) > 0)
+ return true;
+ if (shost->host_self_blocked)
+ return true;
+ return false;
}
static void scsi_starved_list_run(struct Scsi_Host *shost)
@@ -413,7 +459,7 @@ static void scsi_starved_list_run(struct Scsi_Host *shost)
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
- blk_run_queue(slq);
+ scsi_kick_queue(slq);
blk_put_queue(slq);
spin_lock_irqsave(shost->host_lock, flags);
@@ -444,7 +490,10 @@ static void scsi_run_queue(struct request_queue *q)
if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host);
- blk_run_queue(q);
+ if (q->mq_ops)
+ blk_mq_start_stopped_hw_queues(q, false);
+ else
+ blk_run_queue(q);
}
void scsi_requeue_run_queue(struct work_struct *work)
@@ -542,25 +591,70 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
return mempool_alloc(sgp->pool, gfp_mask);
}
+static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
+{
+ if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
+ return;
+ __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
+}
+
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, bool mq)
{
+ struct scatterlist *first_chunk = NULL;
int ret;
BUG_ON(!nents);
+ if (mq) {
+ if (nents <= SCSI_MAX_SG_SEGMENTS) {
+ sdb->table.nents = nents;
+ sg_init_table(sdb->table.sgl, sdb->table.nents);
+ return 0;
+ }
+ first_chunk = sdb->table.sgl;
+ }
+
ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
- gfp_mask, scsi_sg_alloc);
+ first_chunk, gfp_mask, scsi_sg_alloc);
if (unlikely(ret))
- __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
- scsi_sg_free);
-
+ scsi_free_sgtable(sdb, mq);
return ret;
}
-static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
+static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
+{
+ if (cmd->request->cmd_type == REQ_TYPE_FS) {
+ struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
+
+ if (drv->uninit_command)
+ drv->uninit_command(cmd);
+ }
+}
+
+static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
- __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
+ if (cmd->sdb.table.nents)
+ scsi_free_sgtable(&cmd->sdb, true);
+ if (cmd->request->next_rq && cmd->request->next_rq->special)
+ scsi_free_sgtable(cmd->request->next_rq->special, true);
+ if (scsi_prot_sg_count(cmd))
+ scsi_free_sgtable(cmd->prot_sdb, true);
+}
+
+static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ unsigned long flags;
+
+ BUG_ON(list_empty(&cmd->list));
+
+ scsi_mq_free_sgtables(cmd);
+ scsi_uninit_cmd(cmd);
+
+ spin_lock_irqsave(&sdev->list_lock, flags);
+ list_del_init(&cmd->list);
+ spin_unlock_irqrestore(&sdev->list_lock, flags);
}
/*
@@ -579,27 +673,79 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
* the __init_io() function. Primarily this would involve
* the scatter-gather table.
*/
-void scsi_release_buffers(struct scsi_cmnd *cmd)
+static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
- scsi_free_sgtable(&cmd->sdb);
+ scsi_free_sgtable(&cmd->sdb, false);
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
if (scsi_prot_sg_count(cmd))
- scsi_free_sgtable(cmd->prot_sdb);
+ scsi_free_sgtable(cmd->prot_sdb, false);
}
-EXPORT_SYMBOL(scsi_release_buffers);
static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
{
struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
- scsi_free_sgtable(bidi_sdb);
+ scsi_free_sgtable(bidi_sdb, false);
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
+static bool scsi_end_request(struct request *req, int error,
+ unsigned int bytes, unsigned int bidi_bytes)
+{
+ struct scsi_cmnd *cmd = req->special;
+ struct scsi_device *sdev = cmd->device;
+ struct request_queue *q = sdev->request_queue;
+
+ if (blk_update_request(req, error, bytes))
+ return true;
+
+ /* Bidi request must be completed as a whole */
+ if (unlikely(bidi_bytes) &&
+ blk_update_request(req->next_rq, error, bidi_bytes))
+ return true;
+
+ if (blk_queue_add_random(q))
+ add_disk_randomness(req->rq_disk);
+
+ if (req->mq_ctx) {
+ /*
+ * In the MQ case the command gets freed by __blk_mq_end_io,
+ * so we have to do all cleanup that depends on it earlier.
+ *
+ * We also can't kick the queues from irq context, so we
+ * will have to defer it to a workqueue.
+ */
+ scsi_mq_uninit_cmd(cmd);
+
+ __blk_mq_end_io(req, error);
+
+ if (scsi_target(sdev)->single_lun ||
+ !list_empty(&sdev->host->starved_list))
+ kblockd_schedule_work(&sdev->requeue_work);
+ else
+ blk_mq_start_stopped_hw_queues(q, true);
+
+ put_device(&sdev->sdev_gendev);
+ } else {
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_finish_request(req, error);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (bidi_bytes)
+ scsi_release_bidi_buffers(cmd);
+ scsi_release_buffers(cmd);
+ scsi_next_command(cmd);
+ }
+
+ return false;
+}
+
/**
* __scsi_error_from_host_byte - translate SCSI error code into errno
* @cmd: SCSI command (unused)
@@ -672,7 +818,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
* be put back on the queue and retried using the same
* command as before, possibly after a delay.
*
- * c) We can call blk_end_request() with -EIO to fail
+ * c) We can call scsi_end_request() with -EIO to fail
* the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
@@ -686,7 +832,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
int sense_deferred = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
- char *description = NULL;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
if (result) {
@@ -724,13 +869,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* both sides at once.
*/
req->next_rq->resid_len = scsi_in(cmd)->resid;
-
- scsi_release_buffers(cmd);
- scsi_release_bidi_buffers(cmd);
-
- blk_end_request_all(req, 0);
-
- scsi_next_command(cmd);
+ if (scsi_end_request(req, 0, blk_rq_bytes(req),
+ blk_rq_bytes(req->next_rq)))
+ BUG();
return;
}
} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
@@ -750,9 +891,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* Next deal with any sectors which we were able to correctly
* handle.
*/
- SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
- "%d bytes done.\n",
- blk_rq_sectors(req), good_bytes));
+ SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
+ "%u sectors total, %d bytes done.\n",
+ blk_rq_sectors(req), good_bytes));
/*
* Recovered errors need reporting, but they're always treated
@@ -777,15 +918,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
/*
* If we finished all bytes in the request we are done now.
*/
- if (!blk_end_request(req, error, good_bytes))
- goto next_command;
+ if (!scsi_end_request(req, error, good_bytes, 0))
+ return;
/*
* Kill remainder if no retrys.
*/
if (error && scsi_noretry_cmd(cmd)) {
- blk_end_request_all(req, error);
- goto next_command;
+ if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
+ BUG();
+ return;
}
/*
@@ -811,7 +953,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
* and quietly refuse further access.
*/
cmd->device->changed = 1;
- description = "Media Changed";
action = ACTION_FAIL;
} else {
/* Must have been a power glitch, or a
@@ -839,27 +980,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
cmd->device->use_10_for_rw = 0;
action = ACTION_REPREP;
} else if (sshdr.asc == 0x10) /* DIX */ {
- description = "Host Data Integrity Failure";
action = ACTION_FAIL;
error = -EILSEQ;
/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
- switch (cmd->cmnd[0]) {
- case UNMAP:
- description = "Discard failure";
- break;
- case WRITE_SAME:
- case WRITE_SAME_16:
- if (cmd->cmnd[1] & 0x8)
- description = "Discard failure";
- else
- description =
- "Write same failure";
- break;
- default:
- description = "Invalid command failure";
- break;
- }
action = ACTION_FAIL;
error = -EREMOTEIO;
} else
@@ -867,10 +991,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
break;
case ABORTED_COMMAND:
action = ACTION_FAIL;
- if (sshdr.asc == 0x10) { /* DIF */
- description = "Target Data Integrity Failure";
+ if (sshdr.asc == 0x10) /* DIF */
error = -EILSEQ;
- }
break;
case NOT_READY:
/* If the device is in the process of becoming
@@ -889,57 +1011,52 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
action = ACTION_DELAYED_RETRY;
break;
default:
- description = "Device not ready";
action = ACTION_FAIL;
break;
}
- } else {
- description = "Device not ready";
+ } else
action = ACTION_FAIL;
- }
break;
case VOLUME_OVERFLOW:
/* See SSC3rXX or current. */
action = ACTION_FAIL;
break;
default:
- description = "Unhandled sense code";
action = ACTION_FAIL;
break;
}
- } else {
- description = "Unhandled error code";
+ } else
action = ACTION_FAIL;
- }
if (action != ACTION_FAIL &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
action = ACTION_FAIL;
- description = "Command timed out";
- }
switch (action) {
case ACTION_FAIL:
/* Give up and fail the remainder of the request */
if (!(req->cmd_flags & REQ_QUIET)) {
- if (description)
- scmd_printk(KERN_INFO, cmd, "%s\n",
- description);
scsi_print_result(cmd);
if (driver_byte(result) & DRIVER_SENSE)
scsi_print_sense("", cmd);
scsi_print_command(cmd);
}
- if (!blk_end_request_err(req, error))
- goto next_command;
+ if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
+ return;
/*FALLTHRU*/
case ACTION_REPREP:
requeue:
/* Unprep the request and put it back at the head of the queue.
* A new command will be prepared and issued.
*/
- scsi_release_buffers(cmd);
- scsi_requeue_command(q, cmd);
+ if (q->mq_ops) {
+ cmd->request->cmd_flags &= ~REQ_DONTPREP;
+ scsi_mq_uninit_cmd(cmd);
+ scsi_mq_requeue_cmd(cmd);
+ } else {
+ scsi_release_buffers(cmd);
+ scsi_requeue_command(q, cmd);
+ }
break;
case ACTION_RETRY:
/* Retry the same command immediately */
@@ -950,11 +1067,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
break;
}
- return;
-
-next_command:
- scsi_release_buffers(cmd);
- scsi_next_command(cmd);
}
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -966,9 +1078,8 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
* If sg table allocation fails, requeue request later.
*/
if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
- gfp_mask))) {
+ gfp_mask, req->mq_ctx != NULL)))
return BLKPREP_DEFER;
- }
/*
* Next, walk the list, and fill in the addresses and sizes of
@@ -996,21 +1107,29 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
+ bool is_mq = (rq->mq_ctx != NULL);
+ int error;
+
+ BUG_ON(!rq->nr_phys_segments);
- int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
+ error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
if (error)
goto err_exit;
if (blk_bidi_rq(rq)) {
- struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
- scsi_sdb_cache, GFP_ATOMIC);
- if (!bidi_sdb) {
- error = BLKPREP_DEFER;
- goto err_exit;
+ if (!rq->q->mq_ops) {
+ struct scsi_data_buffer *bidi_sdb =
+ kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
+ if (!bidi_sdb) {
+ error = BLKPREP_DEFER;
+ goto err_exit;
+ }
+
+ rq->next_rq->special = bidi_sdb;
}
- rq->next_rq->special = bidi_sdb;
- error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
+ error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special,
+ GFP_ATOMIC);
if (error)
goto err_exit;
}
@@ -1022,7 +1141,7 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
BUG_ON(prot_sdb == NULL);
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
- if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
+ if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask, is_mq)) {
error = BLKPREP_DEFER;
goto err_exit;
}
@@ -1036,13 +1155,16 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
cmd->prot_sdb->table.nents = count;
}
- return BLKPREP_OK ;
-
+ return BLKPREP_OK;
err_exit:
- scsi_release_buffers(cmd);
- cmd->request->special = NULL;
- scsi_put_command(cmd);
- put_device(&sdev->sdev_gendev);
+ if (is_mq) {
+ scsi_mq_free_sgtables(cmd);
+ } else {
+ scsi_release_buffers(cmd);
+ cmd->request->special = NULL;
+ scsi_put_command(cmd);
+ put_device(&sdev->sdev_gendev);
+ }
return error;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1077,7 +1199,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
return cmd;
}
-int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
+static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
@@ -1088,11 +1210,7 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
* submit a request without an attached bio.
*/
if (req->bio) {
- int ret;
-
- BUG_ON(!req->nr_phys_segments);
-
- ret = scsi_init_io(cmd, GFP_ATOMIC);
+ int ret = scsi_init_io(cmd, GFP_ATOMIC);
if (unlikely(ret))
return ret;
} else {
@@ -1102,25 +1220,16 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
}
cmd->cmd_len = req->cmd_len;
- if (!blk_rq_bytes(req))
- cmd->sc_data_direction = DMA_NONE;
- else if (rq_data_dir(req) == WRITE)
- cmd->sc_data_direction = DMA_TO_DEVICE;
- else
- cmd->sc_data_direction = DMA_FROM_DEVICE;
-
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = req->retries;
return BLKPREP_OK;
}
-EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
/*
- * Setup a REQ_TYPE_FS command. These are simple read/write request
- * from filesystems that still need to be translated to SCSI CDBs from
- * the ULD.
+ * Setup a REQ_TYPE_FS command. These are simple request from filesystems
+ * that still need to be translated to SCSI CDBs from the ULD.
*/
-int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
+static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
struct scsi_cmnd *cmd = req->special;
@@ -1131,15 +1240,30 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
return ret;
}
- /*
- * Filesystem requests must transfer data.
- */
- BUG_ON(!req->nr_phys_segments);
-
memset(cmd->cmnd, 0, BLK_MAX_CDB);
- return scsi_init_io(cmd, GFP_ATOMIC);
+ return scsi_cmd_to_driver(cmd)->init_command(cmd);
+}
+
+static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
+{
+ struct scsi_cmnd *cmd = req->special;
+
+ if (!blk_rq_bytes(req))
+ cmd->sc_data_direction = DMA_NONE;
+ else if (rq_data_dir(req) == WRITE)
+ cmd->sc_data_direction = DMA_TO_DEVICE;
+ else
+ cmd->sc_data_direction = DMA_FROM_DEVICE;
+
+ switch (req->cmd_type) {
+ case REQ_TYPE_FS:
+ return scsi_setup_fs_cmnd(sdev, req);
+ case REQ_TYPE_BLOCK_PC:
+ return scsi_setup_blk_pc_cmnd(sdev, req);
+ default:
+ return BLKPREP_KILL;
+ }
}
-EXPORT_SYMBOL(scsi_setup_fs_cmnd);
static int
scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
@@ -1218,7 +1342,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
* queue must be restarted, so we schedule a callback to happen
* shortly.
*/
- if (sdev->device_busy == 0)
+ if (atomic_read(&sdev->device_busy) == 0)
blk_delay_queue(q, SCSI_QUEUE_DELAY);
break;
default:
@@ -1244,26 +1368,14 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
goto out;
}
- if (req->cmd_type == REQ_TYPE_FS)
- ret = scsi_cmd_to_driver(cmd)->init_command(cmd);
- else if (req->cmd_type == REQ_TYPE_BLOCK_PC)
- ret = scsi_setup_blk_pc_cmnd(sdev, req);
- else
- ret = BLKPREP_KILL;
-
+ ret = scsi_setup_cmnd(sdev, req);
out:
return scsi_prep_return(q, req, ret);
}
static void scsi_unprep_fn(struct request_queue *q, struct request *req)
{
- if (req->cmd_type == REQ_TYPE_FS) {
- struct scsi_cmnd *cmd = req->special;
- struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
-
- if (drv->uninit_command)
- drv->uninit_command(cmd);
- }
+ scsi_uninit_cmd(req->special);
}
/*
@@ -1275,99 +1387,144 @@ static void scsi_unprep_fn(struct request_queue *q, struct request *req)
static inline int scsi_dev_queue_ready(struct request_queue *q,
struct scsi_device *sdev)
{
- if (sdev->device_busy == 0 && sdev->device_blocked) {
+ unsigned int busy;
+
+ busy = atomic_inc_return(&sdev->device_busy) - 1;
+ if (atomic_read(&sdev->device_blocked)) {
+ if (busy)
+ goto out_dec;
+
/*
* unblock after device_blocked iterates to zero
*/
- if (--sdev->device_blocked == 0) {
- SCSI_LOG_MLQUEUE(3,
- sdev_printk(KERN_INFO, sdev,
- "unblocking device at zero depth\n"));
- } else {
- blk_delay_queue(q, SCSI_QUEUE_DELAY);
- return 0;
+ if (atomic_dec_return(&sdev->device_blocked) > 0) {
+ /*
+ * For the MQ case we take care of this in the caller.
+ */
+ if (!q->mq_ops)
+ blk_delay_queue(q, SCSI_QUEUE_DELAY);
+ goto out_dec;
}
+ SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
+ "unblocking device at zero depth\n"));
}
- if (scsi_device_is_busy(sdev))
- return 0;
+
+ if (busy >= sdev->queue_depth)
+ goto out_dec;
return 1;
+out_dec:
+ atomic_dec(&sdev->device_busy);
+ return 0;
}
-
/*
* scsi_target_queue_ready: checks if there we can send commands to target
* @sdev: scsi device on starget to check.
- *
- * Called with the host lock held.
*/
static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
struct scsi_device *sdev)
{
struct scsi_target *starget = scsi_target(sdev);
+ unsigned int busy;
if (starget->single_lun) {
+ spin_lock_irq(shost->host_lock);
if (starget->starget_sdev_user &&
- starget->starget_sdev_user != sdev)
+ starget->starget_sdev_user != sdev) {
+ spin_unlock_irq(shost->host_lock);
return 0;
+ }
starget->starget_sdev_user = sdev;
+ spin_unlock_irq(shost->host_lock);
}
- if (starget->target_busy == 0 && starget->target_blocked) {
+ if (starget->can_queue <= 0)
+ return 1;
+
+ busy = atomic_inc_return(&starget->target_busy) - 1;
+ if (atomic_read(&starget->target_blocked) > 0) {
+ if (busy)
+ goto starved;
+
/*
* unblock after target_blocked iterates to zero
*/
- if (--starget->target_blocked == 0) {
- SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
- "unblocking target at zero depth\n"));
- } else
- return 0;
- }
+ if (atomic_dec_return(&starget->target_blocked) > 0)
+ goto out_dec;
- if (scsi_target_is_busy(starget)) {
- list_move_tail(&sdev->starved_entry, &shost->starved_list);
- return 0;
+ SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
+ "unblocking target at zero depth\n"));
}
+ if (busy >= starget->can_queue)
+ goto starved;
+
return 1;
+
+starved:
+ spin_lock_irq(shost->host_lock);
+ list_move_tail(&sdev->starved_entry, &shost->starved_list);
+ spin_unlock_irq(shost->host_lock);
+out_dec:
+ if (starget->can_queue > 0)
+ atomic_dec(&starget->target_busy);
+ return 0;
}
/*
* scsi_host_queue_ready: if we can send requests to shost, return 1 else
* return 0. We must end up running the queue again whenever 0 is
* returned, else IO can hang.
- *
- * Called with host_lock held.
*/
static inline int scsi_host_queue_ready(struct request_queue *q,
struct Scsi_Host *shost,
struct scsi_device *sdev)
{
+ unsigned int busy;
+
if (scsi_host_in_recovery(shost))
return 0;
- if (shost->host_busy == 0 && shost->host_blocked) {
+
+ busy = atomic_inc_return(&shost->host_busy) - 1;
+ if (atomic_read(&shost->host_blocked) > 0) {
+ if (busy)
+ goto starved;
+
/*
* unblock after host_blocked iterates to zero
*/
- if (--shost->host_blocked == 0) {
- SCSI_LOG_MLQUEUE(3,
- printk("scsi%d unblocking host at zero depth\n",
- shost->host_no));
- } else {
- return 0;
- }
- }
- if (scsi_host_is_busy(shost)) {
- if (list_empty(&sdev->starved_entry))
- list_add_tail(&sdev->starved_entry, &shost->starved_list);
- return 0;
+ if (atomic_dec_return(&shost->host_blocked) > 0)
+ goto out_dec;
+
+ SCSI_LOG_MLQUEUE(3,
+ shost_printk(KERN_INFO, shost,
+ "unblocking host at zero depth\n"));
}
+ if (shost->can_queue > 0 && busy >= shost->can_queue)
+ goto starved;
+ if (shost->host_self_blocked)
+ goto starved;
+
/* We're OK to process the command, so we can't be starved */
- if (!list_empty(&sdev->starved_entry))
- list_del_init(&sdev->starved_entry);
+ if (!list_empty(&sdev->starved_entry)) {
+ spin_lock_irq(shost->host_lock);
+ if (!list_empty(&sdev->starved_entry))
+ list_del_init(&sdev->starved_entry);
+ spin_unlock_irq(shost->host_lock);
+ }
return 1;
+
+starved:
+ spin_lock_irq(shost->host_lock);
+ if (list_empty(&sdev->starved_entry))
+ list_add_tail(&sdev->starved_entry, &shost->starved_list);
+ spin_unlock_irq(shost->host_lock);
+out_dec:
+ atomic_dec(&shost->host_busy);
+ return 0;
}
/*
@@ -1430,13 +1587,10 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
* bump busy counts. To bump the counters, we need to dance
* with the locks as normal issue path does.
*/
- sdev->device_busy++;
- spin_unlock(sdev->request_queue->queue_lock);
- spin_lock(shost->host_lock);
- shost->host_busy++;
- starget->target_busy++;
- spin_unlock(shost->host_lock);
- spin_lock(sdev->request_queue->queue_lock);
+ atomic_inc(&sdev->device_busy);
+ atomic_inc(&shost->host_busy);
+ if (starget->can_queue > 0)
+ atomic_inc(&starget->target_busy);
blk_complete_request(req);
}
@@ -1461,7 +1615,7 @@ static void scsi_softirq_done(struct request *rq)
wait_for/HZ);
disposition = SUCCESS;
}
-
+
scsi_log_completion(cmd, disposition);
switch (disposition) {
@@ -1480,6 +1634,23 @@ static void scsi_softirq_done(struct request *rq)
}
}
+/**
+ * scsi_done - Invoke completion on finished SCSI command.
+ * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
+ * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
+ *
+ * Description: This function is the mid-level's (SCSI Core) interrupt routine,
+ * which regains ownership of the SCSI command (de facto) from a LLDD, and
+ * calls blk_complete_request() for further processing.
+ *
+ * This function is interrupt context safe.
+ */
+static void scsi_done(struct scsi_cmnd *cmd)
+{
+ trace_scsi_dispatch_cmd_done(cmd);
+ blk_complete_request(cmd->request);
+}
+
/*
* Function: scsi_request_fn()
*
@@ -1509,11 +1680,11 @@ static void scsi_request_fn(struct request_queue *q)
int rtn;
/*
* get next queueable request. We do this early to make sure
- * that the request is fully prepared even if we cannot
+ * that the request is fully prepared even if we cannot
* accept it.
*/
req = blk_peek_request(q);
- if (!req || !scsi_dev_queue_ready(q, sdev))
+ if (!req)
break;
if (unlikely(!scsi_device_online(sdev))) {
@@ -1523,15 +1694,16 @@ static void scsi_request_fn(struct request_queue *q)
continue;
}
+ if (!scsi_dev_queue_ready(q, sdev))
+ break;
/*
* Remove the request from the request list.
*/
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
blk_start_request(req);
- sdev->device_busy++;
- spin_unlock(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
cmd = req->special;
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n"
@@ -1541,7 +1713,6 @@ static void scsi_request_fn(struct request_queue *q)
blk_dump_rq_flags(req, "foo");
BUG();
}
- spin_lock(shost->host_lock);
/*
* We hit this when the driver is using a host wide
@@ -1552,9 +1723,11 @@ static void scsi_request_fn(struct request_queue *q)
* a run when a tag is freed.
*/
if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
+ spin_lock_irq(shost->host_lock);
if (list_empty(&sdev->starved_entry))
list_add_tail(&sdev->starved_entry,
&shost->starved_list);
+ spin_unlock_irq(shost->host_lock);
goto not_ready;
}
@@ -1562,16 +1735,7 @@ static void scsi_request_fn(struct request_queue *q)
goto not_ready;
if (!scsi_host_queue_ready(q, shost, sdev))
- goto not_ready;
-
- scsi_target(sdev)->target_busy++;
- shost->host_busy++;
-
- /*
- * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
- * take the lock again.
- */
- spin_unlock_irq(shost->host_lock);
+ goto host_not_ready;
/*
* Finally, initialize any error handling parameters, and set up
@@ -1582,17 +1746,22 @@ static void scsi_request_fn(struct request_queue *q)
/*
* Dispatch the command to the low-level driver.
*/
+ cmd->scsi_done = scsi_done;
rtn = scsi_dispatch_cmd(cmd);
- spin_lock_irq(q->queue_lock);
- if (rtn)
+ if (rtn) {
+ scsi_queue_insert(cmd, rtn);
+ spin_lock_irq(q->queue_lock);
goto out_delay;
+ }
+ spin_lock_irq(q->queue_lock);
}
return;
+ host_not_ready:
+ if (scsi_target(sdev)->can_queue > 0)
+ atomic_dec(&scsi_target(sdev)->target_busy);
not_ready:
- spin_unlock_irq(shost->host_lock);
-
/*
* lock q, handle tag, requeue req, and decrement device_busy. We
* must return with queue_lock held.
@@ -1603,13 +1772,187 @@ static void scsi_request_fn(struct request_queue *q)
*/
spin_lock_irq(q->queue_lock);
blk_requeue_request(q, req);
- sdev->device_busy--;
+ atomic_dec(&sdev->device_busy);
out_delay:
- if (sdev->device_busy == 0)
+ if (atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
blk_delay_queue(q, SCSI_QUEUE_DELAY);
}
-u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
+static inline int prep_to_mq(int ret)
+{
+ switch (ret) {
+ case BLKPREP_OK:
+ return 0;
+ case BLKPREP_DEFER:
+ return BLK_MQ_RQ_QUEUE_BUSY;
+ default:
+ return BLK_MQ_RQ_QUEUE_ERROR;
+ }
+}
+
+static int scsi_mq_prep_fn(struct request *req)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ struct scsi_device *sdev = req->q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ unsigned char *sense_buf = cmd->sense_buffer;
+ struct scatterlist *sg;
+
+ memset(cmd, 0, sizeof(struct scsi_cmnd));
+
+ req->special = cmd;
+
+ cmd->request = req;
+ cmd->device = sdev;
+ cmd->sense_buffer = sense_buf;
+
+ cmd->tag = req->tag;
+
+ req->cmd = req->__cmd;
+ cmd->cmnd = req->cmd;
+ cmd->prot_op = SCSI_PROT_NORMAL;
+
+ INIT_LIST_HEAD(&cmd->list);
+ INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
+ cmd->jiffies_at_alloc = jiffies;
+
+ /*
+ * XXX: cmd_list lookups are only used by two drivers, try to get
+ * rid of this list in common code.
+ */
+ spin_lock_irq(&sdev->list_lock);
+ list_add_tail(&cmd->list, &sdev->cmd_list);
+ spin_unlock_irq(&sdev->list_lock);
+
+ sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
+ cmd->sdb.table.sgl = sg;
+
+ if (scsi_host_get_prot(shost)) {
+ cmd->prot_sdb = (void *)sg +
+ shost->sg_tablesize * sizeof(struct scatterlist);
+ memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
+
+ cmd->prot_sdb->table.sgl =
+ (struct scatterlist *)(cmd->prot_sdb + 1);
+ }
+
+ if (blk_bidi_rq(req)) {
+ struct request *next_rq = req->next_rq;
+ struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);
+
+ memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
+ bidi_sdb->table.sgl =
+ (struct scatterlist *)(bidi_sdb + 1);
+
+ next_rq->special = bidi_sdb;
+ }
+
+ return scsi_setup_cmnd(sdev, req);
+}
+
+static void scsi_mq_done(struct scsi_cmnd *cmd)
+{
+ trace_scsi_dispatch_cmd_done(cmd);
+ blk_mq_complete_request(cmd->request);
+}
+
+static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
+{
+ struct request_queue *q = req->q;
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost = sdev->host;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
+ int ret;
+ int reason;
+
+ ret = prep_to_mq(scsi_prep_state_check(sdev, req));
+ if (ret)
+ goto out;
+
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ if (!get_device(&sdev->sdev_gendev))
+ goto out;
+
+ if (!scsi_dev_queue_ready(q, sdev))
+ goto out_put_device;
+ if (!scsi_target_queue_ready(shost, sdev))
+ goto out_dec_device_busy;
+ if (!scsi_host_queue_ready(q, shost, sdev))
+ goto out_dec_target_busy;
+
+ if (!(req->cmd_flags & REQ_DONTPREP)) {
+ ret = prep_to_mq(scsi_mq_prep_fn(req));
+ if (ret)
+ goto out_dec_host_busy;
+ req->cmd_flags |= REQ_DONTPREP;
+ }
+
+ scsi_init_cmd_errh(cmd);
+ cmd->scsi_done = scsi_mq_done;
+
+ reason = scsi_dispatch_cmd(cmd);
+ if (reason) {
+ scsi_set_blocked(cmd, reason);
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ goto out_dec_host_busy;
+ }
+
+ return BLK_MQ_RQ_QUEUE_OK;
+
+out_dec_host_busy:
+ atomic_dec(&shost->host_busy);
+out_dec_target_busy:
+ if (scsi_target(sdev)->can_queue > 0)
+ atomic_dec(&scsi_target(sdev)->target_busy);
+out_dec_device_busy:
+ atomic_dec(&sdev->device_busy);
+out_put_device:
+ put_device(&sdev->sdev_gendev);
+out:
+ switch (ret) {
+ case BLK_MQ_RQ_QUEUE_BUSY:
+ blk_mq_stop_hw_queue(hctx);
+ if (atomic_read(&sdev->device_busy) == 0 &&
+ !scsi_device_blocked(sdev))
+ blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
+ break;
+ case BLK_MQ_RQ_QUEUE_ERROR:
+ /*
+ * Make sure to release all allocated ressources when
+ * we hit an error, as we will never see this command
+ * again.
+ */
+ if (req->cmd_flags & REQ_DONTPREP)
+ scsi_mq_uninit_cmd(cmd);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int scsi_init_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx,
+ unsigned int numa_node)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
+ numa_node);
+ if (!cmd->sense_buffer)
+ return -ENOMEM;
+ return 0;
+}
+
+static void scsi_exit_request(void *data, struct request *rq,
+ unsigned int hctx_idx, unsigned int request_idx)
+{
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+
+ kfree(cmd->sense_buffer);
+}
+
+static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
{
struct device *host_dev;
u64 bounce_limit = 0xffffffff;
@@ -1629,18 +1972,11 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
return bounce_limit;
}
-EXPORT_SYMBOL(scsi_calculate_bounce_limit);
-struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
- request_fn_proc *request_fn)
+static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
{
- struct request_queue *q;
struct device *dev = shost->dma_dev;
- q = blk_init_queue(request_fn, NULL);
- if (!q)
- return NULL;
-
/*
* this limit is imposed by hardware restrictions
*/
@@ -1671,7 +2007,17 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
* blk_queue_update_dma_alignment() later.
*/
blk_queue_dma_alignment(q, 0x03);
+}
+struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
+ request_fn_proc *request_fn)
+{
+ struct request_queue *q;
+
+ q = blk_init_queue(request_fn, NULL);
+ if (!q)
+ return NULL;
+ __scsi_init_queue(shost, q);
return q;
}
EXPORT_SYMBOL(__scsi_alloc_queue);
@@ -1692,6 +2038,55 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return q;
}
+static struct blk_mq_ops scsi_mq_ops = {
+ .map_queue = blk_mq_map_queue,
+ .queue_rq = scsi_queue_rq,
+ .complete = scsi_softirq_done,
+ .timeout = scsi_times_out,
+ .init_request = scsi_init_request,
+ .exit_request = scsi_exit_request,
+};
+
+struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
+{
+ sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
+ if (IS_ERR(sdev->request_queue))
+ return NULL;
+
+ sdev->request_queue->queuedata = sdev;
+ __scsi_init_queue(sdev->host, sdev->request_queue);
+ return sdev->request_queue;
+}
+
+int scsi_mq_setup_tags(struct Scsi_Host *shost)
+{
+ unsigned int cmd_size, sgl_size, tbl_size;
+
+ tbl_size = shost->sg_tablesize;
+ if (tbl_size > SCSI_MAX_SG_SEGMENTS)
+ tbl_size = SCSI_MAX_SG_SEGMENTS;
+ sgl_size = tbl_size * sizeof(struct scatterlist);
+ cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
+ if (scsi_host_get_prot(shost))
+ cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;
+
+ memset(&shost->tag_set, 0, sizeof(shost->tag_set));
+ shost->tag_set.ops = &scsi_mq_ops;
+ shost->tag_set.nr_hw_queues = 1;
+ shost->tag_set.queue_depth = shost->can_queue;
+ shost->tag_set.cmd_size = cmd_size;
+ shost->tag_set.numa_node = NUMA_NO_NODE;
+ shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+ shost->tag_set.driver_data = shost;
+
+ return blk_mq_alloc_tag_set(&shost->tag_set);
+}
+
+void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+{
+ blk_mq_free_tag_set(&shost->tag_set);
+}
+
/*
* Function: scsi_block_requests()
*
@@ -2147,9 +2542,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
return 0;
illegal:
- SCSI_LOG_ERROR_RECOVERY(1,
+ SCSI_LOG_ERROR_RECOVERY(1,
sdev_printk(KERN_ERR, sdev,
- "Illegal state transition %s->%s\n",
+ "Illegal state transition %s->%s",
scsi_device_state_name(oldstate),
scsi_device_state_name(state))
);
@@ -2345,7 +2740,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
return err;
scsi_run_queue(sdev->request_queue);
- while (sdev->device_busy) {
+ while (atomic_read(&sdev->device_busy)) {
msleep_interruptible(200);
scsi_run_queue(sdev->request_queue);
}
@@ -2437,9 +2832,13 @@ scsi_internal_device_block(struct scsi_device *sdev)
* block layer from calling the midlayer with this device's
* request queue.
*/
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (q->mq_ops) {
+ blk_mq_stop_hw_queues(q);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
return 0;
}
@@ -2485,9 +2884,13 @@ scsi_internal_device_unblock(struct scsi_device *sdev,
sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (q->mq_ops) {
+ blk_mq_start_stopped_hw_queues(q, false);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
return 0;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 48e5b657e79f..12b8e1bee7f0 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -88,6 +88,9 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
+extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
+extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
+extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
extern int scsi_init_queue(void);
extern void scsi_exit_queue(void);
struct request_queue;
@@ -115,7 +118,7 @@ extern void scsi_exit_procfs(void);
extern char scsi_scan_type[];
extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
- unsigned int, unsigned int, int);
+ unsigned int, u64, int);
extern void scsi_forget_host(struct Scsi_Host *);
extern void scsi_rescan_device(struct device *);
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index 86f0c5d5c116..6fcefa2da503 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -185,7 +185,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
sdev = to_scsi_device(dev);
seq_printf(s,
- "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n Vendor: ",
+ "Host: scsi%d Channel: %02d Id: %02d Lun: %02llu\n Vendor: ",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
for (i = 0; i < 8; i++) {
if (sdev->vendor[i] >= 0x20)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e02b3aab56ce..56675dbbf681 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -81,15 +81,11 @@ static const char *scsi_null_device_strs = "nullnullnullnull";
#define MAX_SCSI_LUNS 512
-#ifdef CONFIG_SCSI_MULTI_LUN
-static unsigned int max_scsi_luns = MAX_SCSI_LUNS;
-#else
-static unsigned int max_scsi_luns = 1;
-#endif
+static u64 max_scsi_luns = MAX_SCSI_LUNS;
-module_param_named(max_luns, max_scsi_luns, uint, S_IRUGO|S_IWUSR);
+module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(max_luns,
- "last scsi LUN (should be between 1 and 2^32-1)");
+ "last scsi LUN (should be between 1 and 2^64-1)");
#ifdef CONFIG_SCSI_SCAN_ASYNC
#define SCSI_SCAN_TYPE_DEFAULT "async"
@@ -198,7 +194,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
- printk(KERN_NOTICE "scsi: unlocking floptical drive\n");
+ sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
scsi_cmd[0] = MODE_SENSE;
scsi_cmd[1] = 0;
scsi_cmd[2] = 0x2e;
@@ -224,7 +220,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
* scsi_Device pointer, or NULL on failure.
**/
static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
- unsigned int lun, void *hostdata)
+ u64 lun, void *hostdata)
{
struct scsi_device *sdev;
int display_failure_msg = 1, ret;
@@ -277,7 +273,10 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
- sdev->request_queue = scsi_alloc_queue(sdev);
+ if (shost_use_blk_mq(shost))
+ sdev->request_queue = scsi_mq_alloc_queue(sdev);
+ else
+ sdev->request_queue = scsi_alloc_queue(sdev);
if (!sdev->request_queue) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
@@ -600,8 +599,8 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
HZ / 2 + HZ * scsi_inq_timeout, 3,
&resid);
- SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: INQUIRY %s "
- "with code 0x%x\n",
+ SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
+ "scsi scan: INQUIRY %s with code 0x%x\n",
result ? "failed" : "successful", result));
if (result) {
@@ -671,9 +670,10 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
} else if (pass == 2) {
- printk(KERN_INFO "scsi scan: %d byte inquiry failed. "
- "Consider BLIST_INQUIRY_36 for this device\n",
- try_inquiry_len);
+ sdev_printk(KERN_INFO, sdev,
+ "scsi scan: %d byte inquiry failed. "
+ "Consider BLIST_INQUIRY_36 for this device\n",
+ try_inquiry_len);
/* If this pass failed, the third pass goes back and transfers
* the same amount as we successfully got in the first pass. */
@@ -706,8 +706,9 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* strings.
*/
if (sdev->inquiry_len < 36) {
- printk(KERN_INFO "scsi scan: INQUIRY result too short (%d),"
- " using 36\n", sdev->inquiry_len);
+ sdev_printk(KERN_INFO, sdev,
+ "scsi scan: INQUIRY result too short (%d),"
+ " using 36\n", sdev->inquiry_len);
sdev->inquiry_len = 36;
}
@@ -806,29 +807,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->removable = (inq_result[1] & 0x80) >> 7;
}
- switch (sdev->type) {
- case TYPE_RBC:
- case TYPE_TAPE:
- case TYPE_DISK:
- case TYPE_PRINTER:
- case TYPE_MOD:
- case TYPE_PROCESSOR:
- case TYPE_SCANNER:
- case TYPE_MEDIUM_CHANGER:
- case TYPE_ENCLOSURE:
- case TYPE_COMM:
- case TYPE_RAID:
- case TYPE_OSD:
- sdev->writeable = 1;
- break;
- case TYPE_ROM:
- case TYPE_WORM:
- sdev->writeable = 0;
- break;
- default:
- printk(KERN_INFO "scsi: unknown device type %d\n", sdev->type);
- }
-
if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
/* RBC and MMC devices can return SCSI-3 compliance and yet
* still not support REPORT LUNS, so make them act as
@@ -922,6 +900,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_USE_10_BYTE_MS)
sdev->use_10_for_ms = 1;
+ /* some devices don't like REPORT SUPPORTED OPERATION CODES
+ * and will simply timeout causing sd_mod init to take a very
+ * very long time */
+ if (*bflags & BLIST_NO_RSOC)
+ sdev->no_report_opcodes = 1;
+
/* set the device running here so that slave configure
* may do I/O */
ret = scsi_device_set_state(sdev, SDEV_RUNNING);
@@ -950,7 +934,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
- if (*bflags & BLIST_SKIP_VPD_PAGES)
+ if (*bflags & BLIST_TRY_VPD_PAGES)
+ sdev->try_vpd_pages = 1;
+ else if (*bflags & BLIST_SKIP_VPD_PAGES)
sdev->skip_vpd_pages = 1;
transport_configure_device(&sdev->sdev_gendev);
@@ -1032,7 +1018,7 @@ static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
* SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
**/
static int scsi_probe_and_add_lun(struct scsi_target *starget,
- uint lun, int *bflagsp,
+ u64 lun, int *bflagsp,
struct scsi_device **sdevp, int rescan,
void *hostdata)
{
@@ -1048,7 +1034,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
sdev = scsi_device_lookup_by_target(starget, lun);
if (sdev) {
if (rescan || !scsi_device_created(sdev)) {
- SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
+ SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: device exists on %s\n",
dev_name(&sdev->sdev_gendev)));
if (sdevp)
@@ -1135,7 +1121,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
(result[0] & 0x1f) == 0x1f &&
!scsi_is_wlun(lun)) {
- SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO
+ SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: peripheral device type"
" of 31, no device added\n"));
res = SCSI_SCAN_TARGET_PRESENT;
@@ -1185,11 +1171,12 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
static void scsi_sequential_lun_scan(struct scsi_target *starget,
int bflags, int scsi_level, int rescan)
{
- unsigned int sparse_lun, lun, max_dev_lun;
+ uint max_dev_lun;
+ u64 sparse_lun, lun;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
- SCSI_LOG_SCAN_BUS(3, printk(KERN_INFO "scsi scan: Sequential scan of"
- "%s\n", dev_name(&starget->dev)));
+ SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
+ "scsi scan: Sequential scan\n"));
max_dev_lun = min(max_scsi_luns, shost->max_lun);
/*
@@ -1239,6 +1226,12 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
max_dev_lun = min(8U, max_dev_lun);
/*
+ * Stop scanning at 255 unless BLIST_SCSI3LUN
+ */
+ if (!(bflags & BLIST_SCSI3LUN))
+ max_dev_lun = min(256U, max_dev_lun);
+
+ /*
* We have already scanned LUN 0, so start at LUN 1. Keep scanning
* until we reach the max, or no LUN is found and we are not
* sparse_lun.
@@ -1260,24 +1253,25 @@ static void scsi_sequential_lun_scan(struct scsi_target *starget,
* truncation before using this function.
*
* Notes:
- * The struct scsi_lun is assumed to be four levels, with each level
- * effectively containing a SCSI byte-ordered (big endian) short; the
- * addressing bits of each level are ignored (the highest two bits).
* For a description of the LUN format, post SCSI-3 see the SCSI
* Architecture Model, for SCSI-3 see the SCSI Controller Commands.
*
- * Given a struct scsi_lun of: 0a 04 0b 03 00 00 00 00, this function returns
- * the integer: 0x0b030a04
+ * Given a struct scsi_lun of: d2 04 0b 03 00 00 00 00, this function
+ * returns the integer: 0x0b03d204
+ *
+ * This encoding will return a standard integer LUN for LUNs smaller
+ * than 256, which typically use a single level LUN structure with
+ * addressing method 0.
**/
-int scsilun_to_int(struct scsi_lun *scsilun)
+u64 scsilun_to_int(struct scsi_lun *scsilun)
{
int i;
- unsigned int lun;
+ u64 lun;
lun = 0;
for (i = 0; i < sizeof(lun); i += 2)
- lun = lun | (((scsilun->scsi_lun[i] << 8) |
- scsilun->scsi_lun[i + 1]) << (i * 8));
+ lun = lun | (((u64)scsilun->scsi_lun[i] << ((i + 1) * 8)) |
+ ((u64)scsilun->scsi_lun[i + 1] << (i * 8)));
return lun;
}
EXPORT_SYMBOL(scsilun_to_int);
@@ -1291,16 +1285,13 @@ EXPORT_SYMBOL(scsilun_to_int);
* Reverts the functionality of the scsilun_to_int, which packed
* an 8-byte lun value into an int. This routine unpacks the int
* back into the lun value.
- * Note: the scsilun_to_int() routine does not truly handle all
- * 8bytes of the lun value. This functions restores only as much
- * as was set by the routine.
*
* Notes:
- * Given an integer : 0x0b030a04, this function returns a
- * scsi_lun of : struct scsi_lun of: 0a 04 0b 03 00 00 00 00
+ * Given an integer : 0x0b03d204, this function returns a
+ * struct scsi_lun of: d2 04 0b 03 00 00 00 00
*
**/
-void int_to_scsilun(unsigned int lun, struct scsi_lun *scsilun)
+void int_to_scsilun(u64 lun, struct scsi_lun *scsilun)
{
int i;
@@ -1340,7 +1331,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
char devname[64];
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
unsigned int length;
- unsigned int lun;
+ u64 lun;
unsigned int num_luns;
unsigned int retries;
int result;
@@ -1430,17 +1421,19 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
* a retry.
*/
for (retries = 0; retries < 3; retries++) {
- SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: Sending"
- " REPORT LUNS to %s (try %d)\n", devname,
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: Sending REPORT LUNS to (try %d)\n",
retries));
result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
lun_data, length, &sshdr,
SCSI_TIMEOUT + 4 * HZ, 3, NULL);
- SCSI_LOG_SCAN_BUS(3, printk (KERN_INFO "scsi scan: REPORT LUNS"
- " %s (try %d) result 0x%x\n", result
- ? "failed" : "successful", retries, result));
+ SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
+ "scsi scan: REPORT LUNS"
+ " %s (try %d) result 0x%x\n",
+ result ? "failed" : "successful",
+ retries, result));
if (result == 0)
break;
else if (scsi_sense_valid(&sshdr)) {
@@ -1466,10 +1459,11 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
num_luns = (length / sizeof(struct scsi_lun));
if (num_luns > max_scsi_report_luns) {
- printk(KERN_WARNING "scsi: On %s only %d (max_scsi_report_luns)"
- " of %d luns reported, try increasing"
- " max_scsi_report_luns.\n", devname,
- max_scsi_report_luns, num_luns);
+ sdev_printk(KERN_WARNING, sdev,
+ "Only %d (max_scsi_report_luns)"
+ " of %d luns reported, try increasing"
+ " max_scsi_report_luns.\n",
+ max_scsi_report_luns, num_luns);
num_luns = max_scsi_report_luns;
}
@@ -1483,27 +1477,10 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
lun = scsilun_to_int(lunp);
- /*
- * Check if the unused part of lunp is non-zero, and so
- * does not fit in lun.
- */
- if (memcmp(&lunp->scsi_lun[sizeof(lun)], "\0\0\0\0", 4)) {
- int i;
-
- /*
- * Output an error displaying the LUN in byte order,
- * this differs from what linux would print for the
- * integer LUN value.
- */
- printk(KERN_WARNING "scsi: %s lun 0x", devname);
- data = (char *)lunp->scsi_lun;
- for (i = 0; i < sizeof(struct scsi_lun); i++)
- printk("%02x", data[i]);
- printk(" has a LUN larger than currently supported.\n");
- } else if (lun > sdev->host->max_lun) {
- printk(KERN_WARNING "scsi: %s lun%d has a LUN larger"
- " than allowed by the host adapter\n",
- devname, lun);
+ if (lun > sdev->host->max_lun) {
+ sdev_printk(KERN_WARNING, sdev,
+ "lun%llu has a LUN larger than"
+ " allowed by the host adapter\n", lun);
} else {
int res;
@@ -1515,8 +1492,8 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
*/
sdev_printk(KERN_ERR, sdev,
"Unexpected response"
- " from lun %d while scanning, scan"
- " aborted\n", lun);
+ " from lun %llu while scanning, scan"
+ " aborted\n", (unsigned long long)lun);
break;
}
}
@@ -1535,7 +1512,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
}
struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
- uint id, uint lun, void *hostdata)
+ uint id, u64 lun, void *hostdata)
{
struct scsi_device *sdev = ERR_PTR(-ENODEV);
struct device *parent = &shost->shost_gendev;
@@ -1571,7 +1548,7 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
EXPORT_SYMBOL(__scsi_add_device);
int scsi_add_device(struct Scsi_Host *host, uint channel,
- uint target, uint lun)
+ uint target, u64 lun)
{
struct scsi_device *sdev =
__scsi_add_device(host, channel, target, lun, NULL);
@@ -1600,7 +1577,7 @@ void scsi_rescan_device(struct device *dev)
EXPORT_SYMBOL(scsi_rescan_device);
static void __scsi_scan_target(struct device *parent, unsigned int channel,
- unsigned int id, unsigned int lun, int rescan)
+ unsigned int id, u64 lun, int rescan)
{
struct Scsi_Host *shost = dev_to_shost(parent);
int bflags = 0;
@@ -1668,7 +1645,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
* sequential scan of LUNs on the target id.
**/
void scsi_scan_target(struct device *parent, unsigned int channel,
- unsigned int id, unsigned int lun, int rescan)
+ unsigned int id, u64 lun, int rescan)
{
struct Scsi_Host *shost = dev_to_shost(parent);
@@ -1688,7 +1665,7 @@ void scsi_scan_target(struct device *parent, unsigned int channel,
EXPORT_SYMBOL(scsi_scan_target);
static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
- unsigned int id, unsigned int lun, int rescan)
+ unsigned int id, u64 lun, int rescan)
{
uint order_id;
@@ -1719,10 +1696,10 @@ static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
}
int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
- unsigned int id, unsigned int lun, int rescan)
+ unsigned int id, u64 lun, int rescan)
{
SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
- "%s: <%u:%u:%u>\n",
+ "%s: <%u:%u:%llu>\n",
__func__, channel, id, lun));
if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
@@ -1781,8 +1758,7 @@ static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
return NULL;
if (shost->async_scan) {
- printk("%s called twice for host %d", __func__,
- shost->host_no);
+ shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
dump_stack();
return NULL;
}
@@ -1835,8 +1811,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
mutex_lock(&shost->scan_mutex);
if (!shost->async_scan) {
- printk("%s called twice for host %d", __func__,
- shost->host_no);
+ shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
dump_stack();
mutex_unlock(&shost->scan_mutex);
return;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 074e8cc30955..8b4105a22ac2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -80,7 +80,7 @@ const char *scsi_host_state_name(enum scsi_host_state state)
return name;
}
-static int check_set(unsigned int *val, char *src)
+static int check_set(unsigned long long *val, char *src)
{
char *last;
@@ -90,7 +90,7 @@ static int check_set(unsigned int *val, char *src)
/*
* Doesn't check for int overflow
*/
- *val = simple_strtoul(src, &last, 0);
+ *val = simple_strtoull(src, &last, 0);
if (*last != '\0')
return 1;
}
@@ -99,11 +99,11 @@ static int check_set(unsigned int *val, char *src)
static int scsi_scan(struct Scsi_Host *shost, const char *str)
{
- char s1[15], s2[15], s3[15], junk;
- unsigned int channel, id, lun;
+ char s1[15], s2[15], s3[17], junk;
+ unsigned long long channel, id, lun;
int res;
- res = sscanf(str, "%10s %10s %10s %c", s1, s2, s3, &junk);
+ res = sscanf(str, "%10s %10s %16s %c", s1, s2, s3, &junk);
if (res != 3)
return -EINVAL;
if (check_set(&channel, s1))
@@ -333,8 +333,8 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
+shost_rd_attr(use_blk_mq, "%d\n");
shost_rd_attr(unique_id, "%u\n");
-shost_rd_attr(host_busy, "%hu\n");
shost_rd_attr(cmd_per_lun, "%hd\n");
shost_rd_attr(can_queue, "%hd\n");
shost_rd_attr(sg_tablesize, "%hu\n");
@@ -344,7 +344,16 @@ shost_rd_attr(prot_capabilities, "%u\n");
shost_rd_attr(prot_guard_type, "%hd\n");
shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
+static ssize_t
+show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy));
+}
+static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
+
static struct attribute *scsi_sysfs_shost_attrs[] = {
+ &dev_attr_use_blk_mq.attr,
&dev_attr_unique_id.attr,
&dev_attr_host_busy.attr,
&dev_attr_cmd_per_lun.attr,
@@ -577,14 +586,30 @@ static int scsi_sdev_check_buf_bit(const char *buf)
/*
* Create the actual show/store functions and data structures.
*/
-sdev_rd_attr (device_blocked, "%d\n");
-sdev_rd_attr (device_busy, "%d\n");
sdev_rd_attr (type, "%d\n");
sdev_rd_attr (scsi_level, "%d\n");
sdev_rd_attr (vendor, "%.8s\n");
sdev_rd_attr (model, "%.16s\n");
sdev_rd_attr (rev, "%.4s\n");
+static ssize_t
+sdev_show_device_busy(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_busy));
+}
+static DEVICE_ATTR(device_busy, S_IRUGO, sdev_show_device_busy, NULL);
+
+static ssize_t
+sdev_show_device_blocked(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ return snprintf(buf, 20, "%d\n", atomic_read(&sdev->device_blocked));
+}
+static DEVICE_ATTR(device_blocked, S_IRUGO, sdev_show_device_blocked, NULL);
+
/*
* TODO: can we make these symlinks to the block layer ones?
*/
@@ -885,9 +910,9 @@ sdev_store_queue_ramp_up_period(struct device *dev,
const char *buf, size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
- unsigned long period;
+ unsigned int period;
- if (strict_strtoul(buf, 10, &period))
+ if (kstrtouint(buf, 10, &period))
return -EINVAL;
sdev->queue_ramp_up_period = msecs_to_jiffies(period);
@@ -1230,13 +1255,13 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
device_initialize(&sdev->sdev_gendev);
sdev->sdev_gendev.bus = &scsi_bus_type;
sdev->sdev_gendev.type = &scsi_dev_type;
- dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%d",
+ dev_set_name(&sdev->sdev_gendev, "%d:%d:%d:%llu",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
device_initialize(&sdev->sdev_dev);
sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev);
sdev->sdev_dev.class = &sdev_class;
- dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d",
+ dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
sdev->scsi_level = starget->scsi_level;
transport_setup_device(&sdev->sdev_gendev);
diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c
deleted file mode 100644
index 6209110f295d..000000000000
--- a/drivers/scsi/scsi_tgt_if.c
+++ /dev/null
@@ -1,399 +0,0 @@
-/*
- * SCSI target kernel/user interface functions
- *
- * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
- * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-#include <linux/miscdevice.h>
-#include <linux/gfp.h>
-#include <linux/file.h>
-#include <linux/export.h>
-#include <net/tcp.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tgt.h>
-#include <scsi/scsi_tgt_if.h>
-
-#include <asm/cacheflush.h>
-
-#include "scsi_tgt_priv.h"
-
-#if TGT_RING_SIZE < PAGE_SIZE
-# define TGT_RING_SIZE PAGE_SIZE
-#endif
-
-#define TGT_RING_PAGES (TGT_RING_SIZE >> PAGE_SHIFT)
-#define TGT_EVENT_PER_PAGE (PAGE_SIZE / sizeof(struct tgt_event))
-#define TGT_MAX_EVENTS (TGT_EVENT_PER_PAGE * TGT_RING_PAGES)
-
-struct tgt_ring {
- u32 tr_idx;
- unsigned long tr_pages[TGT_RING_PAGES];
- spinlock_t tr_lock;
-};
-
-/* tx_ring : kernel->user, rx_ring : user->kernel */
-static struct tgt_ring tx_ring, rx_ring;
-static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
-
-static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
-{
- if (ring->tr_idx == TGT_MAX_EVENTS - 1)
- ring->tr_idx = 0;
- else
- ring->tr_idx++;
-}
-
-static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
-{
- u32 pidx, off;
-
- pidx = idx / TGT_EVENT_PER_PAGE;
- off = idx % TGT_EVENT_PER_PAGE;
-
- return (struct tgt_event *)
- (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
-}
-
-static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
-{
- struct tgt_event *ev;
- struct tgt_ring *ring = &tx_ring;
- unsigned long flags;
- int err = 0;
-
- spin_lock_irqsave(&ring->tr_lock, flags);
-
- ev = tgt_head_event(ring, ring->tr_idx);
- if (!ev->hdr.status)
- tgt_ring_idx_inc(ring);
- else
- err = -BUSY;
-
- spin_unlock_irqrestore(&ring->tr_lock, flags);
-
- if (err)
- return err;
-
- memcpy(ev, p, sizeof(*ev));
- ev->hdr.type = type;
- mb();
- ev->hdr.status = 1;
-
- flush_dcache_page(virt_to_page(ev));
-
- wake_up_interruptible(&tgt_poll_wait);
-
- return 0;
-}
-
-int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 itn_id,
- struct scsi_lun *lun, u64 tag)
-{
- struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
- struct tgt_event ev;
- int err;
-
- memset(&ev, 0, sizeof(ev));
- ev.p.cmd_req.host_no = shost->host_no;
- ev.p.cmd_req.itn_id = itn_id;
- ev.p.cmd_req.data_len = scsi_bufflen(cmd);
- memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
- memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
- ev.p.cmd_req.attribute = cmd->tag;
- ev.p.cmd_req.tag = tag;
-
- dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
- ev.p.cmd_req.data_len, cmd->tag,
- (unsigned long long) ev.p.cmd_req.tag);
-
- err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
- if (err)
- eprintk("tx buf is full, could not send\n");
-
- return err;
-}
-
-int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 itn_id, u64 tag)
-{
- struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
- struct tgt_event ev;
- int err;
-
- memset(&ev, 0, sizeof(ev));
- ev.p.cmd_done.host_no = shost->host_no;
- ev.p.cmd_done.itn_id = itn_id;
- ev.p.cmd_done.tag = tag;
- ev.p.cmd_done.result = cmd->result;
-
- dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
- (unsigned long long) ev.p.cmd_req.tag,
- ev.p.cmd_req.data_len, cmd->tag);
-
- err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
- if (err)
- eprintk("tx buf is full, could not send\n");
-
- return err;
-}
-
-int scsi_tgt_uspace_send_tsk_mgmt(int host_no, u64 itn_id, int function,
- u64 tag, struct scsi_lun *scsilun, void *data)
-{
- struct tgt_event ev;
- int err;
-
- memset(&ev, 0, sizeof(ev));
- ev.p.tsk_mgmt_req.host_no = host_no;
- ev.p.tsk_mgmt_req.itn_id = itn_id;
- ev.p.tsk_mgmt_req.function = function;
- ev.p.tsk_mgmt_req.tag = tag;
- memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
- ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
-
- dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
- (unsigned long long) ev.p.tsk_mgmt_req.mid);
-
- err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
- if (err)
- eprintk("tx buf is full, could not send\n");
-
- return err;
-}
-
-int scsi_tgt_uspace_send_it_nexus_request(int host_no, u64 itn_id,
- int function, char *initiator_id)
-{
- struct tgt_event ev;
- int err;
-
- memset(&ev, 0, sizeof(ev));
- ev.p.it_nexus_req.host_no = host_no;
- ev.p.it_nexus_req.function = function;
- ev.p.it_nexus_req.itn_id = itn_id;
- if (initiator_id)
- strncpy(ev.p.it_nexus_req.initiator_id, initiator_id,
- sizeof(ev.p.it_nexus_req.initiator_id));
-
- dprintk("%d %x %llx\n", host_no, function, (unsigned long long)itn_id);
-
- err = tgt_uspace_send_event(TGT_KEVENT_IT_NEXUS_REQ, &ev);
- if (err)
- eprintk("tx buf is full, could not send\n");
-
- return err;
-}
-
-static int event_recv_msg(struct tgt_event *ev)
-{
- int err = 0;
-
- switch (ev->hdr.type) {
- case TGT_UEVENT_CMD_RSP:
- err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
- ev->p.cmd_rsp.itn_id,
- ev->p.cmd_rsp.result,
- ev->p.cmd_rsp.tag,
- ev->p.cmd_rsp.uaddr,
- ev->p.cmd_rsp.len,
- ev->p.cmd_rsp.sense_uaddr,
- ev->p.cmd_rsp.sense_len,
- ev->p.cmd_rsp.rw);
- break;
- case TGT_UEVENT_TSK_MGMT_RSP:
- err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
- ev->p.tsk_mgmt_rsp.itn_id,
- ev->p.tsk_mgmt_rsp.mid,
- ev->p.tsk_mgmt_rsp.result);
- break;
- case TGT_UEVENT_IT_NEXUS_RSP:
- err = scsi_tgt_kspace_it_nexus_rsp(ev->p.it_nexus_rsp.host_no,
- ev->p.it_nexus_rsp.itn_id,
- ev->p.it_nexus_rsp.result);
- break;
- default:
- eprintk("unknown type %d\n", ev->hdr.type);
- err = -EINVAL;
- }
-
- return err;
-}
-
-static ssize_t tgt_write(struct file *file, const char __user * buffer,
- size_t count, loff_t * ppos)
-{
- struct tgt_event *ev;
- struct tgt_ring *ring = &rx_ring;
-
- while (1) {
- ev = tgt_head_event(ring, ring->tr_idx);
- /* do we need this? */
- flush_dcache_page(virt_to_page(ev));
-
- if (!ev->hdr.status)
- break;
-
- tgt_ring_idx_inc(ring);
- event_recv_msg(ev);
- ev->hdr.status = 0;
- };
-
- return count;
-}
-
-static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
-{
- struct tgt_event *ev;
- struct tgt_ring *ring = &tx_ring;
- unsigned long flags;
- unsigned int mask = 0;
- u32 idx;
-
- poll_wait(file, &tgt_poll_wait, wait);
-
- spin_lock_irqsave(&ring->tr_lock, flags);
-
- idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
- ev = tgt_head_event(ring, idx);
- if (ev->hdr.status)
- mask |= POLLIN | POLLRDNORM;
-
- spin_unlock_irqrestore(&ring->tr_lock, flags);
-
- return mask;
-}
-
-static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
- struct tgt_ring *ring)
-{
- int i, err;
-
- for (i = 0; i < TGT_RING_PAGES; i++) {
- struct page *page = virt_to_page(ring->tr_pages[i]);
- err = vm_insert_page(vma, addr, page);
- if (err)
- return err;
- addr += PAGE_SIZE;
- }
-
- return 0;
-}
-
-static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long addr;
- int err;
-
- if (vma->vm_pgoff)
- return -EINVAL;
-
- if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
- eprintk("mmap size must be %lu, not %lu \n",
- TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
- return -EINVAL;
- }
-
- addr = vma->vm_start;
- err = uspace_ring_map(vma, addr, &tx_ring);
- if (err)
- return err;
- err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
-
- return err;
-}
-
-static int tgt_open(struct inode *inode, struct file *file)
-{
- tx_ring.tr_idx = rx_ring.tr_idx = 0;
-
- return 0;
-}
-
-static const struct file_operations tgt_fops = {
- .owner = THIS_MODULE,
- .open = tgt_open,
- .poll = tgt_poll,
- .write = tgt_write,
- .mmap = tgt_mmap,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice tgt_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "tgt",
- .fops = &tgt_fops,
-};
-
-static void tgt_ring_exit(struct tgt_ring *ring)
-{
- int i;
-
- for (i = 0; i < TGT_RING_PAGES; i++)
- free_page(ring->tr_pages[i]);
-}
-
-static int tgt_ring_init(struct tgt_ring *ring)
-{
- int i;
-
- spin_lock_init(&ring->tr_lock);
-
- for (i = 0; i < TGT_RING_PAGES; i++) {
- ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
- if (!ring->tr_pages[i]) {
- eprintk("out of memory\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-void scsi_tgt_if_exit(void)
-{
- tgt_ring_exit(&tx_ring);
- tgt_ring_exit(&rx_ring);
- misc_deregister(&tgt_miscdev);
-}
-
-int scsi_tgt_if_init(void)
-{
- int err;
-
- err = tgt_ring_init(&tx_ring);
- if (err)
- return err;
-
- err = tgt_ring_init(&rx_ring);
- if (err)
- goto free_tx_ring;
-
- err = misc_register(&tgt_miscdev);
- if (err)
- goto free_rx_ring;
-
- return 0;
-free_rx_ring:
- tgt_ring_exit(&rx_ring);
-free_tx_ring:
- tgt_ring_exit(&tx_ring);
-
- return err;
-}
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
deleted file mode 100644
index e51add05fb8d..000000000000
--- a/drivers/scsi/scsi_tgt_lib.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- * SCSI target lib functions
- *
- * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
- * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-#include <linux/blkdev.h>
-#include <linux/hash.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_transport.h>
-#include <scsi/scsi_tgt.h>
-
-#include "scsi_tgt_priv.h"
-
-static struct workqueue_struct *scsi_tgtd;
-static struct kmem_cache *scsi_tgt_cmd_cache;
-
-/*
- * TODO: this struct will be killed when the block layer supports large bios
- * and James's work struct code is in
- */
-struct scsi_tgt_cmd {
- /* TODO replace work with James b's code */
- struct work_struct work;
- /* TODO fix limits of some drivers */
- struct bio *bio;
-
- struct list_head hash_list;
- struct request *rq;
- u64 itn_id;
- u64 tag;
-};
-
-#define TGT_HASH_ORDER 4
-#define cmd_hashfn(tag) hash_long((unsigned long) (tag), TGT_HASH_ORDER)
-
-struct scsi_tgt_queuedata {
- struct Scsi_Host *shost;
- struct list_head cmd_hash[1 << TGT_HASH_ORDER];
- spinlock_t cmd_hash_lock;
-};
-
-/*
- * Function: scsi_host_get_command()
- *
- * Purpose: Allocate and setup a scsi command block and blk request
- *
- * Arguments: shost - scsi host
- * data_dir - dma data dir
- * gfp_mask- allocator flags
- *
- * Returns: The allocated scsi command structure.
- *
- * This should be called by target LLDs to get a command.
- */
-struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
- enum dma_data_direction data_dir,
- gfp_t gfp_mask)
-{
- int write = (data_dir == DMA_TO_DEVICE);
- struct request *rq;
- struct scsi_cmnd *cmd;
- struct scsi_tgt_cmd *tcmd;
-
- /* Bail if we can't get a reference to the device */
- if (!get_device(&shost->shost_gendev))
- return NULL;
-
- tcmd = kmem_cache_alloc(scsi_tgt_cmd_cache, GFP_ATOMIC);
- if (!tcmd)
- goto put_dev;
-
- /*
- * The blk helpers are used to the READ/WRITE requests
- * transferring data from a initiator point of view. Since
- * we are in target mode we want the opposite.
- */
- rq = blk_get_request(shost->uspace_req_q, !write, gfp_mask);
- if (!rq)
- goto free_tcmd;
-
- cmd = __scsi_get_command(shost, gfp_mask);
- if (!cmd)
- goto release_rq;
-
- cmd->sc_data_direction = data_dir;
- cmd->jiffies_at_alloc = jiffies;
- cmd->request = rq;
-
- cmd->cmnd = rq->cmd;
-
- rq->special = cmd;
- rq->cmd_type = REQ_TYPE_SPECIAL;
- rq->cmd_flags |= REQ_TYPE_BLOCK_PC;
- rq->end_io_data = tcmd;
-
- tcmd->rq = rq;
-
- return cmd;
-
-release_rq:
- blk_put_request(rq);
-free_tcmd:
- kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
-put_dev:
- put_device(&shost->shost_gendev);
- return NULL;
-
-}
-EXPORT_SYMBOL_GPL(scsi_host_get_command);
-
-/*
- * Function: scsi_host_put_command()
- *
- * Purpose: Free a scsi command block
- *
- * Arguments: shost - scsi host
- * cmd - command block to free
- *
- * Returns: Nothing.
- *
- * Notes: The command must not belong to any lists.
- */
-void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
- struct request_queue *q = shost->uspace_req_q;
- struct request *rq = cmd->request;
- struct scsi_tgt_cmd *tcmd = rq->end_io_data;
- unsigned long flags;
-
- kmem_cache_free(scsi_tgt_cmd_cache, tcmd);
-
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_put_request(q, rq);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- __scsi_put_command(shost, cmd);
- put_device(&shost->shost_gendev);
-}
-EXPORT_SYMBOL_GPL(scsi_host_put_command);
-
-static void cmd_hashlist_del(struct scsi_cmnd *cmd)
-{
- struct request_queue *q = cmd->request->q;
- struct scsi_tgt_queuedata *qdata = q->queuedata;
- unsigned long flags;
- struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
-
- spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
- list_del(&tcmd->hash_list);
- spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
-}
-
-static void scsi_unmap_user_pages(struct scsi_tgt_cmd *tcmd)
-{
- blk_rq_unmap_user(tcmd->bio);
-}
-
-static void scsi_tgt_cmd_destroy(struct work_struct *work)
-{
- struct scsi_tgt_cmd *tcmd =
- container_of(work, struct scsi_tgt_cmd, work);
- struct scsi_cmnd *cmd = tcmd->rq->special;
-
- dprintk("cmd %p %d %u\n", cmd, cmd->sc_data_direction,
- rq_data_dir(cmd->request));
- scsi_unmap_user_pages(tcmd);
- tcmd->rq->bio = NULL;
- scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
-}
-
-static void init_scsi_tgt_cmd(struct request *rq, struct scsi_tgt_cmd *tcmd,
- u64 itn_id, u64 tag)
-{
- struct scsi_tgt_queuedata *qdata = rq->q->queuedata;
- unsigned long flags;
- struct list_head *head;
-
- tcmd->itn_id = itn_id;
- tcmd->tag = tag;
- tcmd->bio = NULL;
- INIT_WORK(&tcmd->work, scsi_tgt_cmd_destroy);
- spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
- head = &qdata->cmd_hash[cmd_hashfn(tag)];
- list_add(&tcmd->hash_list, head);
- spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
-}
-
-/*
- * scsi_tgt_alloc_queue - setup queue used for message passing
- * shost: scsi host
- *
- * This should be called by the LLD after host allocation.
- * And will be released when the host is released.
- */
-int scsi_tgt_alloc_queue(struct Scsi_Host *shost)
-{
- struct scsi_tgt_queuedata *queuedata;
- struct request_queue *q;
- int err, i;
-
- /*
- * Do we need to send a netlink event or should uspace
- * just respond to the hotplug event?
- */
- q = __scsi_alloc_queue(shost, NULL);
- if (!q)
- return -ENOMEM;
-
- queuedata = kzalloc(sizeof(*queuedata), GFP_KERNEL);
- if (!queuedata) {
- err = -ENOMEM;
- goto cleanup_queue;
- }
- queuedata->shost = shost;
- q->queuedata = queuedata;
-
- /*
- * this is a silly hack. We should probably just queue as many
- * command as is recvd to userspace. uspace can then make
- * sure we do not overload the HBA
- */
- q->nr_requests = shost->can_queue;
- /*
- * We currently only support software LLDs so this does
- * not matter for now. Do we need this for the cards we support?
- * If so we should make it a host template value.
- */
- blk_queue_dma_alignment(q, 0);
- shost->uspace_req_q = q;
-
- for (i = 0; i < ARRAY_SIZE(queuedata->cmd_hash); i++)
- INIT_LIST_HEAD(&queuedata->cmd_hash[i]);
- spin_lock_init(&queuedata->cmd_hash_lock);
-
- return 0;
-
-cleanup_queue:
- blk_cleanup_queue(q);
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_tgt_alloc_queue);
-
-void scsi_tgt_free_queue(struct Scsi_Host *shost)
-{
- int i;
- unsigned long flags;
- struct request_queue *q = shost->uspace_req_q;
- struct scsi_cmnd *cmd;
- struct scsi_tgt_queuedata *qdata = q->queuedata;
- struct scsi_tgt_cmd *tcmd, *n;
- LIST_HEAD(cmds);
-
- spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
-
- for (i = 0; i < ARRAY_SIZE(qdata->cmd_hash); i++) {
- list_for_each_entry_safe(tcmd, n, &qdata->cmd_hash[i],
- hash_list)
- list_move(&tcmd->hash_list, &cmds);
- }
-
- spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
-
- while (!list_empty(&cmds)) {
- tcmd = list_entry(cmds.next, struct scsi_tgt_cmd, hash_list);
- list_del(&tcmd->hash_list);
- cmd = tcmd->rq->special;
-
- shost->hostt->eh_abort_handler(cmd);
- scsi_tgt_cmd_destroy(&tcmd->work);
- }
-}
-EXPORT_SYMBOL_GPL(scsi_tgt_free_queue);
-
-struct Scsi_Host *scsi_tgt_cmd_to_host(struct scsi_cmnd *cmd)
-{
- struct scsi_tgt_queuedata *queue = cmd->request->q->queuedata;
- return queue->shost;
-}
-EXPORT_SYMBOL_GPL(scsi_tgt_cmd_to_host);
-
-/*
- * scsi_tgt_queue_command - queue command for userspace processing
- * @cmd: scsi command
- * @scsilun: scsi lun
- * @tag: unique value to identify this command for tmf
- */
-int scsi_tgt_queue_command(struct scsi_cmnd *cmd, u64 itn_id,
- struct scsi_lun *scsilun, u64 tag)
-{
- struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
- int err;
-
- init_scsi_tgt_cmd(cmd->request, tcmd, itn_id, tag);
- err = scsi_tgt_uspace_send_cmd(cmd, itn_id, scsilun, tag);
- if (err)
- cmd_hashlist_del(cmd);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_tgt_queue_command);
-
-/*
- * This is run from a interrupt handler normally and the unmap
- * needs process context so we must queue
- */
-static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
-{
- struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;
-
- dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
-
- scsi_tgt_uspace_send_status(cmd, tcmd->itn_id, tcmd->tag);
-
- scsi_release_buffers(cmd);
-
- queue_work(scsi_tgtd, &tcmd->work);
-}
-
-static int scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
-{
- struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
- int err;
-
- dprintk("cmd %p %u\n", cmd, rq_data_dir(cmd->request));
-
- err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
- switch (err) {
- case SCSI_MLQUEUE_HOST_BUSY:
- case SCSI_MLQUEUE_DEVICE_BUSY:
- return -EAGAIN;
- }
- return 0;
-}
-
-/* TODO: test this crap and replace bio_map_user with new interface maybe */
-static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
- unsigned long uaddr, unsigned int len, int rw)
-{
- struct request_queue *q = cmd->request->q;
- struct request *rq = cmd->request;
- int err;
-
- dprintk("%lx %u\n", uaddr, len);
- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
- if (err) {
- /*
- * TODO: need to fixup sg_tablesize, max_segment_size,
- * max_sectors, etc for modern HW and software drivers
- * where this value is bogus.
- *
- * TODO2: we can alloc a reserve buffer of max size
- * we can handle and do the slow copy path for really large
- * IO.
- */
- eprintk("Could not handle request of size %u.\n", len);
- return err;
- }
-
- tcmd->bio = rq->bio;
- err = scsi_init_io(cmd, GFP_KERNEL);
- if (err) {
- scsi_release_buffers(cmd);
- goto unmap_rq;
- }
- /*
- * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the
- * length for us.
- */
- cmd->sdb.length = blk_rq_bytes(rq);
-
- return 0;
-
-unmap_rq:
- scsi_unmap_user_pages(tcmd);
- return err;
-}
-
-static int scsi_tgt_copy_sense(struct scsi_cmnd *cmd, unsigned long uaddr,
- unsigned len)
-{
- char __user *p = (char __user *) uaddr;
-
- if (copy_from_user(cmd->sense_buffer, p,
- min_t(unsigned, SCSI_SENSE_BUFFERSIZE, len))) {
- printk(KERN_ERR "Could not copy the sense buffer\n");
- return -EIO;
- }
- return 0;
-}
-
-static int scsi_tgt_abort_cmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
-{
- struct scsi_tgt_cmd *tcmd;
- int err;
-
- err = shost->hostt->eh_abort_handler(cmd);
- if (err)
- eprintk("fail to abort %p\n", cmd);
-
- tcmd = cmd->request->end_io_data;
- scsi_tgt_cmd_destroy(&tcmd->work);
- return err;
-}
-
-static struct request *tgt_cmd_hash_lookup(struct request_queue *q, u64 tag)
-{
- struct scsi_tgt_queuedata *qdata = q->queuedata;
- struct request *rq = NULL;
- struct list_head *head;
- struct scsi_tgt_cmd *tcmd;
- unsigned long flags;
-
- head = &qdata->cmd_hash[cmd_hashfn(tag)];
- spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
- list_for_each_entry(tcmd, head, hash_list) {
- if (tcmd->tag == tag) {
- rq = tcmd->rq;
- list_del(&tcmd->hash_list);
- break;
- }
- }
- spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
-
- return rq;
-}
-
-int scsi_tgt_kspace_exec(int host_no, u64 itn_id, int result, u64 tag,
- unsigned long uaddr, u32 len, unsigned long sense_uaddr,
- u32 sense_len, u8 rw)
-{
- struct Scsi_Host *shost;
- struct scsi_cmnd *cmd;
- struct request *rq;
- struct scsi_tgt_cmd *tcmd;
- int err = 0;
-
- dprintk("%d %llu %d %u %lx %u\n", host_no, (unsigned long long) tag,
- result, len, uaddr, rw);
-
- /* TODO: replace with a O(1) alg */
- shost = scsi_host_lookup(host_no);
- if (!shost) {
- printk(KERN_ERR "Could not find host no %d\n", host_no);
- return -EINVAL;
- }
-
- if (!shost->uspace_req_q) {
- printk(KERN_ERR "Not target scsi host %d\n", host_no);
- goto done;
- }
-
- rq = tgt_cmd_hash_lookup(shost->uspace_req_q, tag);
- if (!rq) {
- printk(KERN_ERR "Could not find tag %llu\n",
- (unsigned long long) tag);
- err = -EINVAL;
- goto done;
- }
- cmd = rq->special;
-
- dprintk("cmd %p scb %x result %d len %d bufflen %u %u %x\n",
- cmd, cmd->cmnd[0], result, len, scsi_bufflen(cmd),
- rq_data_dir(rq), cmd->cmnd[0]);
-
- if (result == TASK_ABORTED) {
- scsi_tgt_abort_cmd(shost, cmd);
- goto done;
- }
- /*
- * store the userspace values here, the working values are
- * in the request_* values
- */
- tcmd = cmd->request->end_io_data;
- cmd->result = result;
-
- if (cmd->result == SAM_STAT_CHECK_CONDITION)
- scsi_tgt_copy_sense(cmd, sense_uaddr, sense_len);
-
- if (len) {
- err = scsi_map_user_pages(rq->end_io_data, cmd, uaddr, len, rw);
- if (err) {
- /*
- * user-space daemon bugs or OOM
- * TODO: we can do better for OOM.
- */
- struct scsi_tgt_queuedata *qdata;
- struct list_head *head;
- unsigned long flags;
-
- eprintk("cmd %p ret %d uaddr %lx len %d rw %d\n",
- cmd, err, uaddr, len, rw);
-
- qdata = shost->uspace_req_q->queuedata;
- head = &qdata->cmd_hash[cmd_hashfn(tcmd->tag)];
-
- spin_lock_irqsave(&qdata->cmd_hash_lock, flags);
- list_add(&tcmd->hash_list, head);
- spin_unlock_irqrestore(&qdata->cmd_hash_lock, flags);
-
- goto done;
- }
- }
- err = scsi_tgt_transfer_response(cmd);
-done:
- scsi_host_put(shost);
- return err;
-}
-
-int scsi_tgt_tsk_mgmt_request(struct Scsi_Host *shost, u64 itn_id,
- int function, u64 tag, struct scsi_lun *scsilun,
- void *data)
-{
- int err;
-
- /* TODO: need to retry if this fails. */
- err = scsi_tgt_uspace_send_tsk_mgmt(shost->host_no, itn_id,
- function, tag, scsilun, data);
- if (err < 0)
- eprintk("The task management request lost!\n");
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_tgt_tsk_mgmt_request);
-
-int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 itn_id, u64 mid, int result)
-{
- struct Scsi_Host *shost;
- int err = -EINVAL;
-
- dprintk("%d %d %llx\n", host_no, result, (unsigned long long) mid);
-
- shost = scsi_host_lookup(host_no);
- if (!shost) {
- printk(KERN_ERR "Could not find host no %d\n", host_no);
- return err;
- }
-
- if (!shost->uspace_req_q) {
- printk(KERN_ERR "Not target scsi host %d\n", host_no);
- goto done;
- }
-
- err = shost->transportt->tsk_mgmt_response(shost, itn_id, mid, result);
-done:
- scsi_host_put(shost);
- return err;
-}
-
-int scsi_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id,
- char *initiator)
-{
- int err;
-
- /* TODO: need to retry if this fails. */
- err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no, itn_id, 0,
- initiator);
- if (err < 0)
- eprintk("The i_t_neuxs request lost, %d %llx!\n",
- shost->host_no, (unsigned long long)itn_id);
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_create);
-
-int scsi_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id)
-{
- int err;
-
- /* TODO: need to retry if this fails. */
- err = scsi_tgt_uspace_send_it_nexus_request(shost->host_no,
- itn_id, 1, NULL);
- if (err < 0)
- eprintk("The i_t_neuxs request lost, %d %llx!\n",
- shost->host_no, (unsigned long long)itn_id);
- return err;
-}
-EXPORT_SYMBOL_GPL(scsi_tgt_it_nexus_destroy);
-
-int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 itn_id, int result)
-{
- struct Scsi_Host *shost;
- int err = -EINVAL;
-
- dprintk("%d %d%llx\n", host_no, result, (unsigned long long)itn_id);
-
- shost = scsi_host_lookup(host_no);
- if (!shost) {
- printk(KERN_ERR "Could not find host no %d\n", host_no);
- return err;
- }
-
- if (!shost->uspace_req_q) {
- printk(KERN_ERR "Not target scsi host %d\n", host_no);
- goto done;
- }
-
- err = shost->transportt->it_nexus_response(shost, itn_id, result);
-done:
- scsi_host_put(shost);
- return err;
-}
-
-static int __init scsi_tgt_init(void)
-{
- int err;
-
- scsi_tgt_cmd_cache = KMEM_CACHE(scsi_tgt_cmd, 0);
- if (!scsi_tgt_cmd_cache)
- return -ENOMEM;
-
- scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1);
- if (!scsi_tgtd) {
- err = -ENOMEM;
- goto free_kmemcache;
- }
-
- err = scsi_tgt_if_init();
- if (err)
- goto destroy_wq;
-
- return 0;
-
-destroy_wq:
- destroy_workqueue(scsi_tgtd);
-free_kmemcache:
- kmem_cache_destroy(scsi_tgt_cmd_cache);
- return err;
-}
-
-static void __exit scsi_tgt_exit(void)
-{
- destroy_workqueue(scsi_tgtd);
- scsi_tgt_if_exit();
- kmem_cache_destroy(scsi_tgt_cmd_cache);
-}
-
-module_init(scsi_tgt_init);
-module_exit(scsi_tgt_exit);
-
-MODULE_DESCRIPTION("SCSI target core");
-MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_tgt_priv.h b/drivers/scsi/scsi_tgt_priv.h
deleted file mode 100644
index fe4c62177f78..000000000000
--- a/drivers/scsi/scsi_tgt_priv.h
+++ /dev/null
@@ -1,32 +0,0 @@
-struct scsi_cmnd;
-struct scsi_lun;
-struct Scsi_Host;
-struct task_struct;
-
-/* tmp - will replace with SCSI logging stuff */
-#define eprintk(fmt, args...) \
-do { \
- printk("%s(%d) " fmt, __func__, __LINE__, ##args); \
-} while (0)
-
-#define dprintk(fmt, args...)
-/* #define dprintk eprintk */
-
-extern void scsi_tgt_if_exit(void);
-extern int scsi_tgt_if_init(void);
-
-extern int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, u64 it_nexus_id,
- struct scsi_lun *lun, u64 tag);
-extern int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 it_nexus_id,
- u64 tag);
-extern int scsi_tgt_kspace_exec(int host_no, u64 it_nexus_id, int result, u64 tag,
- unsigned long uaddr, u32 len,
- unsigned long sense_uaddr, u32 sense_len, u8 rw);
-extern int scsi_tgt_uspace_send_tsk_mgmt(int host_no, u64 it_nexus_id,
- int function, u64 tag,
- struct scsi_lun *scsilun, void *data);
-extern int scsi_tgt_kspace_tsk_mgmt(int host_no, u64 it_nexus_id,
- u64 mid, int result);
-extern int scsi_tgt_uspace_send_it_nexus_request(int host_no, u64 it_nexus_id,
- int function, char *initiator);
-extern int scsi_tgt_kspace_it_nexus_rsp(int host_no, u64 it_nexus_id, int result);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 521f5838594b..5d6f348eb3d8 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -39,7 +39,6 @@
#include <scsi/scsi_netlink_fc.h>
#include <scsi/scsi_bsg_fc.h>
#include "scsi_priv.h"
-#include "scsi_transport_fc_internal.h"
static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
static void fc_vport_sched_delete(struct work_struct *work);
@@ -262,6 +261,10 @@ static const struct {
{ FC_PORTSPEED_8GBIT, "8 Gbit" },
{ FC_PORTSPEED_16GBIT, "16 Gbit" },
{ FC_PORTSPEED_32GBIT, "32 Gbit" },
+ { FC_PORTSPEED_20GBIT, "20 Gbit" },
+ { FC_PORTSPEED_40GBIT, "40 Gbit" },
+ { FC_PORTSPEED_50GBIT, "50 Gbit" },
+ { FC_PORTSPEED_100GBIT, "100 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
@@ -2089,7 +2092,7 @@ fc_timed_out(struct scsi_cmnd *scmd)
* on the rport.
*/
static void
-fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
+fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
{
struct fc_rport *rport;
unsigned long flags;
@@ -2121,7 +2124,7 @@ fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
* object as the parent.
*/
static int
-fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
+fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
{
uint chlo, chhi;
uint tgtlo, tgthi;
@@ -3008,10 +3011,6 @@ fc_remote_port_delete(struct fc_rport *rport)
spin_unlock_irqrestore(shost->host_lock, flags);
- if (rport->roles & FC_PORT_ROLE_FCP_INITIATOR &&
- shost->active_mode & MODE_TARGET)
- fc_tgt_it_nexus_destroy(shost, (unsigned long)rport);
-
scsi_target_block(&rport->dev);
/* see if we need to kill io faster than waiting for device loss */
@@ -3052,7 +3051,6 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
unsigned long flags;
int create = 0;
- int ret;
spin_lock_irqsave(shost->host_lock, flags);
if (roles & FC_PORT_ROLE_FCP_TARGET) {
@@ -3061,12 +3059,6 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
create = 1;
} else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
create = 1;
- } else if (shost->active_mode & MODE_TARGET) {
- ret = fc_tgt_it_nexus_create(shost, (unsigned long)rport,
- (char *)&rport->node_name);
- if (ret)
- printk(KERN_ERR "FC Remore Port tgt nexus failed %d\n",
- ret);
}
rport->roles = roles;
diff --git a/drivers/scsi/scsi_transport_fc_internal.h b/drivers/scsi/scsi_transport_fc_internal.h
deleted file mode 100644
index e7bfbe751c1f..000000000000
--- a/drivers/scsi/scsi_transport_fc_internal.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <scsi/scsi_tgt.h>
-
-#ifdef CONFIG_SCSI_FC_TGT_ATTRS
-static inline int fc_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id,
- char *initiator)
-{
- return scsi_tgt_it_nexus_create(shost, itn_id, initiator);
-}
-
-static inline int fc_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id)
-{
- return scsi_tgt_it_nexus_destroy(shost, itn_id);
-}
-#else
-static inline int fc_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id,
- char *initiator)
-{
- return 0;
-}
-
-static inline int fc_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0102a2d70dd8..b481e62a12cc 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1780,7 +1780,7 @@ EXPORT_SYMBOL_GPL(iscsi_scan_finished);
struct iscsi_scan_data {
unsigned int channel;
unsigned int id;
- unsigned int lun;
+ u64 lun;
};
static int iscsi_user_scan_session(struct device *dev, void *data)
@@ -1827,7 +1827,7 @@ user_scan_exit:
}
static int iscsi_user_scan(struct Scsi_Host *shost, uint channel,
- uint id, uint lun)
+ uint id, u64 lun)
{
struct iscsi_scan_data scan_data;
@@ -3059,7 +3059,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
evchap->u.get_chap.host_no = ev->u.get_chap.host_no;
evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx;
evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries;
- buf = (char *) ((char *)evchap + sizeof(*evchap));
+ buf = (char *)evchap + sizeof(*evchap);
memset(buf, 0, chap_buf_size);
err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
@@ -3463,7 +3463,7 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
evhost_stats->type = nlh->nlmsg_type;
evhost_stats->u.get_host_stats.host_no =
ev->u.get_host_stats.host_no;
- buf = (char *)((char *)evhost_stats + sizeof(*evhost_stats));
+ buf = (char *)evhost_stats + sizeof(*evhost_stats);
memset(buf, 0, host_stats_size);
err = transport->get_host_stats(shost, buf, host_stats_size);
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index c341f855fadc..9a058194b9bd 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1705,7 +1705,7 @@ EXPORT_SYMBOL(scsi_is_sas_rphy);
*/
static int sas_user_scan(struct Scsi_Host *shost, uint channel,
- uint id, uint lun)
+ uint id, u64 lun)
{
struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
struct sas_rphy *rphy;
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 13e898332e45..43fea2219f83 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -33,7 +33,6 @@
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_srp.h>
#include "scsi_priv.h"
-#include "scsi_transport_srp_internal.h"
struct srp_host_attrs {
atomic_t next_port_id;
@@ -746,18 +745,6 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
return ERR_PTR(ret);
}
- if (shost->active_mode & MODE_TARGET &&
- ids->roles == SRP_RPORT_ROLE_INITIATOR) {
- ret = srp_tgt_it_nexus_create(shost, (unsigned long)rport,
- rport->port_id);
- if (ret) {
- device_del(&rport->dev);
- transport_destroy_device(&rport->dev);
- put_device(&rport->dev);
- return ERR_PTR(ret);
- }
- }
-
transport_add_device(&rport->dev);
transport_configure_device(&rport->dev);
@@ -774,11 +761,6 @@ EXPORT_SYMBOL_GPL(srp_rport_add);
void srp_rport_del(struct srp_rport *rport)
{
struct device *dev = &rport->dev;
- struct Scsi_Host *shost = dev_to_shost(dev->parent);
-
- if (shost->active_mode & MODE_TARGET &&
- rport->roles == SRP_RPORT_ROLE_INITIATOR)
- srp_tgt_it_nexus_destroy(shost, (unsigned long)rport);
transport_remove_device(dev);
device_del(dev);
diff --git a/drivers/scsi/scsi_transport_srp_internal.h b/drivers/scsi/scsi_transport_srp_internal.h
deleted file mode 100644
index 8a79747f9f3d..000000000000
--- a/drivers/scsi/scsi_transport_srp_internal.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#include <scsi/scsi_tgt.h>
-
-#ifdef CONFIG_SCSI_SRP_TGT_ATTRS
-static inline int srp_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id,
- char *initiator)
-{
- return scsi_tgt_it_nexus_create(shost, itn_id, initiator);
-}
-
-static inline int srp_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id)
-{
- return scsi_tgt_it_nexus_destroy(shost, itn_id);
-}
-
-#else
-static inline int srp_tgt_it_nexus_create(struct Scsi_Host *shost, u64 itn_id,
- char *initiator)
-{
- return 0;
-}
-static inline int srp_tgt_it_nexus_destroy(struct Scsi_Host *shost, u64 itn_id)
-{
- return 0;
-}
-#endif
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6825eda1114a..2c2041ca4b70 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -134,6 +134,19 @@ static const char *sd_cache_types[] = {
"write back, no read (daft)"
};
+static void sd_set_flush_flag(struct scsi_disk *sdkp)
+{
+ unsigned flush = 0;
+
+ if (sdkp->WCE) {
+ flush |= REQ_FLUSH;
+ if (sdkp->DPOFUA)
+ flush |= REQ_FUA;
+ }
+
+ blk_queue_flush(sdkp->disk->queue, flush);
+}
+
static ssize_t
cache_type_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -177,6 +190,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
if (sdkp->cache_override) {
sdkp->WCE = wce;
sdkp->RCD = rcd;
+ sd_set_flush_flag(sdkp);
return count;
}
@@ -677,8 +691,10 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
* Will issue either UNMAP or WRITE SAME(16) depending on preference
* indicated by target device.
**/
-static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
+static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
{
+ struct request *rq = cmd->request;
+ struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
sector_t sector = blk_rq_pos(rq);
unsigned int nr_sectors = blk_rq_sectors(rq);
@@ -690,9 +706,6 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
- rq->timeout = SD_TIMEOUT;
-
- memset(rq->cmd, 0, rq->cmd_len);
page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
if (!page)
@@ -702,9 +715,9 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
case SD_LBP_UNMAP:
buf = page_address(page);
- rq->cmd_len = 10;
- rq->cmd[0] = UNMAP;
- rq->cmd[8] = 24;
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = UNMAP;
+ cmd->cmnd[8] = 24;
put_unaligned_be16(6 + 16, &buf[0]);
put_unaligned_be16(16, &buf[2]);
@@ -715,23 +728,23 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
break;
case SD_LBP_WS16:
- rq->cmd_len = 16;
- rq->cmd[0] = WRITE_SAME_16;
- rq->cmd[1] = 0x8; /* UNMAP */
- put_unaligned_be64(sector, &rq->cmd[2]);
- put_unaligned_be32(nr_sectors, &rq->cmd[10]);
+ cmd->cmd_len = 16;
+ cmd->cmnd[0] = WRITE_SAME_16;
+ cmd->cmnd[1] = 0x8; /* UNMAP */
+ put_unaligned_be64(sector, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
len = sdkp->device->sector_size;
break;
case SD_LBP_WS10:
case SD_LBP_ZERO:
- rq->cmd_len = 10;
- rq->cmd[0] = WRITE_SAME;
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = WRITE_SAME;
if (sdkp->provisioning_mode == SD_LBP_WS10)
- rq->cmd[1] = 0x8; /* UNMAP */
- put_unaligned_be32(sector, &rq->cmd[2]);
- put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+ cmd->cmnd[1] = 0x8; /* UNMAP */
+ put_unaligned_be32(sector, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
len = sdkp->device->sector_size;
break;
@@ -742,8 +755,21 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
}
rq->completion_data = page;
+ rq->timeout = SD_TIMEOUT;
+
+ cmd->transfersize = len;
+ cmd->allowed = SD_MAX_RETRIES;
+
+ /*
+ * Initially __data_len is set to the amount of data that needs to be
+ * transferred to the target. This amount depends on whether WRITE SAME
+ * or UNMAP is being used. After the scatterlist has been mapped by
+ * scsi_init_io() we set __data_len to the size of the area to be
+ * discarded on disk. This allows us to report completion on the full
+ * amount of blocks described by the request.
+ */
blk_add_request_payload(rq, page, len);
- ret = scsi_setup_blk_pc_cmnd(sdp, rq);
+ ret = scsi_init_io(cmd, GFP_ATOMIC);
rq->__data_len = nr_bytes;
out:
@@ -785,14 +811,15 @@ out:
/**
* sd_setup_write_same_cmnd - write the same data to multiple blocks
- * @sdp: scsi device to operate one
- * @rq: Request to prepare
+ * @cmd: command to prepare
*
* Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
* preference indicated by target device.
**/
-static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
+static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
{
+ struct request *rq = cmd->request;
+ struct scsi_device *sdp = cmd->device;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
struct bio *bio = rq->bio;
sector_t sector = blk_rq_pos(rq);
@@ -808,53 +835,56 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
sector >>= ilog2(sdp->sector_size) - 9;
nr_sectors >>= ilog2(sdp->sector_size) - 9;
- rq->__data_len = sdp->sector_size;
rq->timeout = SD_WRITE_SAME_TIMEOUT;
- memset(rq->cmd, 0, rq->cmd_len);
if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
- rq->cmd_len = 16;
- rq->cmd[0] = WRITE_SAME_16;
- put_unaligned_be64(sector, &rq->cmd[2]);
- put_unaligned_be32(nr_sectors, &rq->cmd[10]);
+ cmd->cmd_len = 16;
+ cmd->cmnd[0] = WRITE_SAME_16;
+ put_unaligned_be64(sector, &cmd->cmnd[2]);
+ put_unaligned_be32(nr_sectors, &cmd->cmnd[10]);
} else {
- rq->cmd_len = 10;
- rq->cmd[0] = WRITE_SAME;
- put_unaligned_be32(sector, &rq->cmd[2]);
- put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+ cmd->cmd_len = 10;
+ cmd->cmnd[0] = WRITE_SAME;
+ put_unaligned_be32(sector, &cmd->cmnd[2]);
+ put_unaligned_be16(nr_sectors, &cmd->cmnd[7]);
}
- ret = scsi_setup_blk_pc_cmnd(sdp, rq);
- rq->__data_len = nr_bytes;
+ cmd->transfersize = sdp->sector_size;
+ cmd->allowed = SD_MAX_RETRIES;
+ /*
+ * For WRITE_SAME the data transferred in the DATA IN buffer is
+ * different from the amount of data actually written to the target.
+ *
+ * We set up __data_len to the amount of data transferred from the
+ * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
+ * to transfer a single sector of data first, but then reset it to
+ * the amount of data to be written right after so that the I/O path
+ * knows how much to actually write.
+ */
+ rq->__data_len = sdp->sector_size;
+ ret = scsi_init_io(cmd, GFP_ATOMIC);
+ rq->__data_len = nr_bytes;
return ret;
}
-static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
+static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
- rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER;
- rq->retries = SD_MAX_RETRIES;
- rq->cmd[0] = SYNCHRONIZE_CACHE;
- rq->cmd_len = 10;
-
- return scsi_setup_blk_pc_cmnd(sdp, rq);
-}
+ struct request *rq = cmd->request;
-static void sd_uninit_command(struct scsi_cmnd *SCpnt)
-{
- struct request *rq = SCpnt->request;
+ /* flush requests don't perform I/O, zero the S/G table */
+ memset(&cmd->sdb, 0, sizeof(cmd->sdb));
- if (rq->cmd_flags & REQ_DISCARD)
- __free_page(rq->completion_data);
+ cmd->cmnd[0] = SYNCHRONIZE_CACHE;
+ cmd->cmd_len = 10;
+ cmd->transfersize = 0;
+ cmd->allowed = SD_MAX_RETRIES;
- if (SCpnt->cmnd != rq->cmd) {
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
- SCpnt->cmnd = NULL;
- SCpnt->cmd_len = 0;
- }
+ rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
+ return BLKPREP_OK;
}
-static int sd_init_command(struct scsi_cmnd *SCpnt)
+static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
struct scsi_device *sdp = SCpnt->device;
@@ -866,21 +896,7 @@ static int sd_init_command(struct scsi_cmnd *SCpnt)
int ret, host_dif;
unsigned char protect;
- /*
- * Discard request come in as REQ_TYPE_FS but we turn them into
- * block PC requests to make life easier.
- */
- if (rq->cmd_flags & REQ_DISCARD) {
- ret = sd_setup_discard_cmnd(sdp, rq);
- goto out;
- } else if (rq->cmd_flags & REQ_WRITE_SAME) {
- ret = sd_setup_write_same_cmnd(sdp, rq);
- goto out;
- } else if (rq->cmd_flags & REQ_FLUSH) {
- ret = scsi_setup_flush_cmnd(sdp, rq);
- goto out;
- }
- ret = scsi_setup_fs_cmnd(sdp, rq);
+ ret = scsi_init_io(SCpnt, GFP_ATOMIC);
if (ret != BLKPREP_OK)
goto out;
SCpnt = rq->special;
@@ -976,18 +992,13 @@ static int sd_init_command(struct scsi_cmnd *SCpnt)
}
}
if (rq_data_dir(rq) == WRITE) {
- if (!sdp->writeable) {
- goto out;
- }
SCpnt->cmnd[0] = WRITE_6;
- SCpnt->sc_data_direction = DMA_TO_DEVICE;
if (blk_integrity_rq(rq))
sd_dif_prepare(rq, block, sdp->sector_size);
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_6;
- SCpnt->sc_data_direction = DMA_FROM_DEVICE;
} else {
scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);
goto out;
@@ -1042,7 +1053,7 @@ static int sd_init_command(struct scsi_cmnd *SCpnt)
SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
- } else if (sdp->use_16_for_rw) {
+ } else if (sdp->use_16_for_rw || (this_count > 0xffff)) {
SCpnt->cmnd[0] += READ_16 - READ_6;
SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
@@ -1061,9 +1072,6 @@ static int sd_init_command(struct scsi_cmnd *SCpnt)
} else if ((this_count > 0xff) || (block > 0x1fffff) ||
scsi_device_protection(SCpnt->device) ||
SCpnt->device->use_10_for_rw) {
- if (this_count > 0xffff)
- this_count = 0xffff;
-
SCpnt->cmnd[0] += READ_10 - READ_6;
SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
@@ -1116,6 +1124,34 @@ static int sd_init_command(struct scsi_cmnd *SCpnt)
return ret;
}
+static int sd_init_command(struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd->request;
+
+ if (rq->cmd_flags & REQ_DISCARD)
+ return sd_setup_discard_cmnd(cmd);
+ else if (rq->cmd_flags & REQ_WRITE_SAME)
+ return sd_setup_write_same_cmnd(cmd);
+ else if (rq->cmd_flags & REQ_FLUSH)
+ return sd_setup_flush_cmnd(cmd);
+ else
+ return sd_setup_read_write_cmnd(cmd);
+}
+
+static void sd_uninit_command(struct scsi_cmnd *SCpnt)
+{
+ struct request *rq = SCpnt->request;
+
+ if (rq->cmd_flags & REQ_DISCARD)
+ __free_page(rq->completion_data);
+
+ if (SCpnt->cmnd != rq->cmd) {
+ mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ SCpnt->cmnd = NULL;
+ SCpnt->cmd_len = 0;
+ }
+}
+
/**
* sd_open - open a scsi disk device
* @inode: only i_rdev member may be used
@@ -2225,7 +2261,11 @@ got_data:
}
}
- sdp->use_16_for_rw = (sdkp->capacity > 0xffffffff);
+ if (sdkp->capacity > 0xffffffff) {
+ sdp->use_16_for_rw = 1;
+ sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS;
+ } else
+ sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
/* Rescale capacity to 512-byte units */
if (sector_size == 4096)
@@ -2540,6 +2580,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
{
unsigned int sector_sz = sdkp->device->sector_size;
const int vpd_len = 64;
+ u32 max_xfer_length;
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
if (!buffer ||
@@ -2547,6 +2588,10 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
goto out;
+ max_xfer_length = get_unaligned_be32(&buffer[8]);
+ if (max_xfer_length)
+ sdkp->max_xfer_blocks = max_xfer_length;
+
blk_queue_io_min(sdkp->disk->queue,
get_unaligned_be16(&buffer[6]) * sector_sz);
blk_queue_io_opt(sdkp->disk->queue,
@@ -2681,6 +2726,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
static int sd_try_extended_inquiry(struct scsi_device *sdp)
{
+ /* Attempt VPD inquiry if the device blacklist explicitly calls
+ * for it.
+ */
+ if (sdp->try_vpd_pages)
+ return 1;
/*
* Although VPD inquiries can go to SCSI-2 type devices,
* some USB ones crash on receiving them, and the pages
@@ -2701,7 +2751,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
unsigned char *buffer;
- unsigned flush = 0;
+ unsigned int max_xfer;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
"sd_revalidate_disk\n"));
@@ -2747,14 +2797,12 @@ static int sd_revalidate_disk(struct gendisk *disk)
* We now have all cache related info, determine how we deal
* with flush requests.
*/
- if (sdkp->WCE) {
- flush |= REQ_FLUSH;
- if (sdkp->DPOFUA)
- flush |= REQ_FUA;
- }
-
- blk_queue_flush(sdkp->disk->queue, flush);
+ sd_set_flush_flag(sdkp);
+ max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+ sdkp->max_xfer_blocks);
+ max_xfer <<= ilog2(sdp->sector_size) - 9;
+ blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
set_capacity(disk, sdkp->capacity);
sd_config_write_same(sdkp);
kfree(buffer);
@@ -3208,12 +3256,14 @@ static int __init init_sd(void)
0, 0, NULL);
if (!sd_cdb_cache) {
printk(KERN_ERR "sd: can't init extended cdb cache\n");
+ err = -ENOMEM;
goto err_out_class;
}
sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache);
if (!sd_cdb_pool) {
printk(KERN_ERR "sd: can't init extended cdb pool\n");
+ err = -ENOMEM;
goto err_out_cache;
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 620871efbf0a..4c3ab8377fd3 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -44,6 +44,8 @@ enum {
};
enum {
+ SD_DEF_XFER_BLOCKS = 0xffff,
+ SD_MAX_XFER_BLOCKS = 0xffffffff,
SD_MAX_WS10_BLOCKS = 0xffff,
SD_MAX_WS16_BLOCKS = 0x7fffff,
};
@@ -64,6 +66,7 @@ struct scsi_disk {
struct gendisk *disk;
atomic_t openers;
sector_t capacity; /* size in 512-byte sectors */
+ u32 max_xfer_blocks;
u32 max_ws_blocks;
u32 max_unmap_blocks;
u32 unmap_granularity;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 53268aaba559..01cf88888797 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -7,9 +7,7 @@
* Original driver (sg.c):
* Copyright (C) 1992 Lawrence Foard
* Version 2 and 3 extensions to driver:
- * Copyright (C) 1998 - 2005 Douglas Gilbert
- *
- * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
+ * Copyright (C) 1998 - 2014 Douglas Gilbert
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,11 +16,11 @@
*
*/
-static int sg_version_num = 30534; /* 2 digits for each component */
-#define SG_VERSION_STR "3.5.34"
+static int sg_version_num = 30536; /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.36"
/*
- * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
+ * D. P. Gilbert (dgilbert@interlog.com), notes:
* - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
* the kernel/module needs to be built with CONFIG_SCSI_LOGGING
* (otherwise the macros compile to empty statements).
@@ -51,6 +49,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#include <linux/delay.h>
#include <linux/blktrace_api.h>
#include <linux/mutex.h>
+#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include "scsi.h"
@@ -64,7 +63,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */
#ifdef CONFIG_SCSI_PROC_FS
#include <linux/proc_fs.h>
-static char *sg_version_date = "20061027";
+static char *sg_version_date = "20140603";
static int sg_proc_init(void);
static void sg_proc_cleanup(void);
@@ -74,6 +73,12 @@ static void sg_proc_cleanup(void);
#define SG_MAX_DEVS 32768
+/* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type
+ * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater
+ * than 16 bytes are "variable length" whose length is a multiple of 4
+ */
+#define SG_MAX_CDB_SIZE 252
+
/*
* Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
* Then when using 32 bit integers x * m may overflow during the calculation.
@@ -102,18 +107,16 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
#define SG_SECTOR_SZ 512
-static int sg_add(struct device *, struct class_interface *);
-static void sg_remove(struct device *, struct class_interface *);
-
-static DEFINE_SPINLOCK(sg_open_exclusive_lock);
+static int sg_add_device(struct device *, struct class_interface *);
+static void sg_remove_device(struct device *, struct class_interface *);
static DEFINE_IDR(sg_index_idr);
static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
file descriptor list for device */
static struct class_interface sg_interface = {
- .add_dev = sg_add,
- .remove_dev = sg_remove,
+ .add_dev = sg_add_device,
+ .remove_dev = sg_remove_device,
};
typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
@@ -146,8 +149,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
- /* sfd_siblings is protected by sg_index_lock */
- struct list_head sfd_siblings;
+ struct list_head sfd_siblings; /* protected by device's sfd_lock */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -161,7 +163,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
char low_dma; /* as in parent but possibly overridden to 1 */
char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
- char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
+ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
struct kref f_ref;
@@ -170,14 +172,15 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
- wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
+ wait_queue_head_t open_wait; /* queue open() when O_EXCL present */
+ struct mutex open_rel_lock; /* held when in open() or release() */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
- /* sfds is protected by sg_index_lock */
struct list_head sfds;
- volatile char detached; /* 0->attached, 1->detached pending removal */
- /* exclude protected by sg_open_exclusive_lock */
- char exclude; /* opened for exclusive access */
+ rwlock_t sfd_lock; /* protect access to sfd list */
+ atomic_t detaching; /* 0->device usable, 1->device detaching */
+ bool exclude; /* 1->open(O_EXCL) succeeded and is active */
+ int open_cnt; /* count of opens (perhaps < num(sfds) ) */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
@@ -197,24 +200,28 @@ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking);
static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
-static void sg_remove_scat(Sg_scatter_hold * schp);
+static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp);
static void sg_build_reserve(Sg_fd * sfp, int req_size);
static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
-static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
+static Sg_fd *sg_add_sfp(Sg_device * sdp);
static void sg_remove_sfp(struct kref *);
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
-static void sg_put_dev(Sg_device *sdp);
+static void sg_device_destroy(struct kref *kref);
#define SZ_SG_HEADER sizeof(struct sg_header)
#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
#define SZ_SG_IOVEC sizeof(sg_iovec_t)
#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
+#define sg_printk(prefix, sdp, fmt, a...) \
+ sdev_printk(prefix, (sdp)->device, "[%s] " fmt, \
+ (sdp)->disk->disk_name, ##a)
+
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
@@ -225,38 +232,43 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
-static int get_exclude(Sg_device *sdp)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- ret = sdp->exclude;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return ret;
-}
-
-static int set_exclude(Sg_device *sdp, char val)
+static int
+open_wait(Sg_device *sdp, int flags)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- sdp->exclude = val;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return val;
-}
+ int retval = 0;
-static int sfds_list_empty(Sg_device *sdp)
-{
- unsigned long flags;
- int ret;
+ if (flags & O_EXCL) {
+ while (sdp->open_cnt > 0) {
+ mutex_unlock(&sdp->open_rel_lock);
+ retval = wait_event_interruptible(sdp->open_wait,
+ (atomic_read(&sdp->detaching) ||
+ !sdp->open_cnt));
+ mutex_lock(&sdp->open_rel_lock);
+
+ if (retval) /* -ERESTARTSYS */
+ return retval;
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ }
+ } else {
+ while (sdp->exclude) {
+ mutex_unlock(&sdp->open_rel_lock);
+ retval = wait_event_interruptible(sdp->open_wait,
+ (atomic_read(&sdp->detaching) ||
+ !sdp->exclude));
+ mutex_lock(&sdp->open_rel_lock);
+
+ if (retval) /* -ERESTARTSYS */
+ return retval;
+ if (atomic_read(&sdp->detaching))
+ return -ENODEV;
+ }
+ }
- read_lock_irqsave(&sg_index_lock, flags);
- ret = list_empty(&sdp->sfds);
- read_unlock_irqrestore(&sg_index_lock, flags);
- return ret;
+ return retval;
}
+/* Returns 0 on success, else a negated errno value */
static int
sg_open(struct inode *inode, struct file *filp)
{
@@ -265,17 +277,17 @@ sg_open(struct inode *inode, struct file *filp)
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
- int res;
int retval;
nonseekable_open(inode, filp);
- SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
+ if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE)))
+ return -EPERM; /* Can't lock it with read only access */
sdp = sg_get_dev(dev);
- if (IS_ERR(sdp)) {
- retval = PTR_ERR(sdp);
- sdp = NULL;
- goto sg_put;
- }
+ if (IS_ERR(sdp))
+ return PTR_ERR(sdp);
+
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_open: flags=0x%x\n", flags));
/* This driver's module count bumped by fops_get in <linux/fs.h> */
/* Prevent the device driver from vanishing while we sleep */
@@ -287,6 +299,9 @@ sg_open(struct inode *inode, struct file *filp)
if (retval)
goto sdp_put;
+ /* scsi_block_when_processing_errors() may block so bypass
+ * check if O_NONBLOCK. Permits SCSI commands to be issued
+ * during error recovery. Tread carefully. */
if (!((flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device))) {
retval = -ENXIO;
@@ -294,65 +309,65 @@ sg_open(struct inode *inode, struct file *filp)
goto error_out;
}
- if (flags & O_EXCL) {
- if (O_RDONLY == (flags & O_ACCMODE)) {
- retval = -EPERM; /* Can't lock it with read only access */
- goto error_out;
- }
- if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait,
- ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
- }
- } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
- if (flags & O_NONBLOCK) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
+ mutex_lock(&sdp->open_rel_lock);
+ if (flags & O_NONBLOCK) {
+ if (flags & O_EXCL) {
+ if (sdp->open_cnt > 0) {
+ retval = -EBUSY;
+ goto error_mutex_locked;
+ }
+ } else {
+ if (sdp->exclude) {
+ retval = -EBUSY;
+ goto error_mutex_locked;
+ }
}
+ } else {
+ retval = open_wait(sdp, flags);
+ if (retval) /* -ERESTARTSYS or -ENODEV */
+ goto error_mutex_locked;
}
- if (sdp->detached) {
- retval = -ENODEV;
- goto error_out;
- }
- if (sfds_list_empty(sdp)) { /* no existing opens on this device */
+
+ /* N.B. at this point we are holding the open_rel_lock */
+ if (flags & O_EXCL)
+ sdp->exclude = true;
+
+ if (sdp->open_cnt < 1) { /* no existing opens */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
- if ((sfp = sg_add_sfp(sdp, dev)))
- filp->private_data = sfp;
- else {
- if (flags & O_EXCL) {
- set_exclude(sdp, 0); /* undo if error */
- wake_up_interruptible(&sdp->o_excl_wait);
- }
- retval = -ENOMEM;
- goto error_out;
+ sfp = sg_add_sfp(sdp);
+ if (IS_ERR(sfp)) {
+ retval = PTR_ERR(sfp);
+ goto out_undo;
}
+
+ filp->private_data = sfp;
+ sdp->open_cnt++;
+ mutex_unlock(&sdp->open_rel_lock);
+
retval = 0;
-error_out:
- if (retval) {
- scsi_autopm_put_device(sdp->device);
-sdp_put:
- scsi_device_put(sdp->device);
- }
sg_put:
- if (sdp)
- sg_put_dev(sdp);
+ kref_put(&sdp->d_ref, sg_device_destroy);
return retval;
+
+out_undo:
+ if (flags & O_EXCL) {
+ sdp->exclude = false; /* undo if error */
+ wake_up_interruptible(&sdp->open_wait);
+ }
+error_mutex_locked:
+ mutex_unlock(&sdp->open_rel_lock);
+error_out:
+ scsi_autopm_put_device(sdp->device);
+sdp_put:
+ scsi_device_put(sdp->device);
+ goto sg_put;
}
-/* Following function was formerly called 'sg_close' */
+/* Release resources associated with a successful sg_open()
+ * Returns 0 on success, else a negated errno value */
static int
sg_release(struct inode *inode, struct file *filp)
{
@@ -361,13 +376,22 @@ sg_release(struct inode *inode, struct file *filp)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
-
- set_exclude(sdp, 0);
- wake_up_interruptible(&sdp->o_excl_wait);
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n"));
+ mutex_lock(&sdp->open_rel_lock);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
+ sdp->open_cnt--;
+
+ /* possibly many open()s waiting on exlude clearing, start many;
+ * only open(O_EXCL)s wait on 0==open_cnt so only start one */
+ if (sdp->exclude) {
+ sdp->exclude = false;
+ wake_up_interruptible_all(&sdp->open_wait);
+ } else if (0 == sdp->open_cnt) {
+ wake_up_interruptible(&sdp->open_wait);
+ }
+ mutex_unlock(&sdp->open_rel_lock);
return 0;
}
@@ -384,8 +408,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
- sdp->disk->disk_name, (int) count));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_read: count=%d\n", (int) count));
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
@@ -419,7 +443,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
}
srp = sg_get_rq_mark(sfp, req_pack_id);
if (!srp) { /* now wait on packet to arrive */
- if (sdp->detached) {
+ if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
@@ -428,9 +452,9 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
goto free_old_hdr;
}
retval = wait_event_interruptible(sfp->read_wait,
- (sdp->detached ||
+ (atomic_read(&sdp->detaching) ||
(srp = sg_get_rq_mark(sfp, req_pack_id))));
- if (sdp->detached) {
+ if (atomic_read(&sdp->detaching)) {
retval = -ENODEV;
goto free_old_hdr;
}
@@ -566,13 +590,13 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
Sg_request *srp;
struct sg_header old_hdr;
sg_io_hdr_t *hp;
- unsigned char cmnd[MAX_COMMAND_SIZE];
+ unsigned char cmnd[SG_MAX_CDB_SIZE];
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
- sdp->disk->disk_name, (int) count));
- if (sdp->detached)
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_write: count=%d\n", (int) count));
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!((filp->f_flags & O_NONBLOCK) ||
scsi_block_when_processing_errors(sdp->device)))
@@ -592,18 +616,13 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
return -EIO; /* The minimum scsi command length is 6 bytes. */
if (!(srp = sg_add_request(sfp))) {
- SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp,
+ "sg_write: queue full\n"));
return -EDOM;
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
if (sfp->next_cmd_len > 0) {
- if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
- SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
- sfp->next_cmd_len = 0;
- sg_remove_request(sfp, srp);
- return -EIO;
- }
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
} else {
@@ -611,7 +630,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
- SCSI_LOG_TIMEOUT(4, printk(
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
input_size = count - cmd_size;
@@ -675,7 +694,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
int k;
Sg_request *srp;
sg_io_hdr_t *hp;
- unsigned char cmnd[MAX_COMMAND_SIZE];
+ unsigned char cmnd[SG_MAX_CDB_SIZE];
int timeout;
unsigned long ul_timeout;
@@ -686,7 +705,8 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
if (!(srp = sg_add_request(sfp))) {
- SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_new_write: queue full\n"));
return -EDOM;
}
srp->sg_io_owned = sg_io_owned;
@@ -743,7 +763,7 @@ static int
sg_common_write(Sg_fd * sfp, Sg_request * srp,
unsigned char *cmnd, int timeout, int blocking)
{
- int k, data_dir;
+ int k, data_dir, at_head;
Sg_device *sdp = sfp->parentdp;
sg_io_hdr_t *hp = &srp->header;
@@ -755,16 +775,18 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
hp->host_status = 0;
hp->driver_status = 0;
hp->resid = 0;
- SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
- (int) cmnd[0], (int) hp->cmd_len));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
+ (int) cmnd[0], (int) hp->cmd_len));
k = sg_start_req(srp, cmnd);
if (k) {
- SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_common_write: start_req err=%d\n", k));
sg_finish_rem_req(srp);
return k; /* probably out of space --> ENOMEM */
}
- if (sdp->detached) {
+ if (atomic_read(&sdp->detaching)) {
if (srp->bio)
blk_end_request_all(srp->rq, -EIO);
sg_finish_rem_req(srp);
@@ -787,11 +809,16 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
break;
}
hp->duration = jiffies_to_msecs(jiffies);
+ if (hp->interface_id != '\0' && /* v3 (or later) interface */
+ (SG_FLAG_Q_AT_TAIL & hp->flags))
+ at_head = 0;
+ else
+ at_head = 1;
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
- srp->rq, 1, sg_rq_end_io);
+ srp->rq, at_head, sg_rq_end_io);
return 0;
}
@@ -806,6 +833,15 @@ static int srp_done(Sg_fd *sfp, Sg_request *srp)
return ret;
}
+static int max_sectors_bytes(struct request_queue *q)
+{
+ unsigned int max_sectors = queue_max_sectors(q);
+
+ max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9);
+
+ return max_sectors << 9;
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -820,13 +856,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
- sdp->disk->disk_name, (int) cmd_in));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_ioctl: cmd=0x%x\n", (int) cmd_in));
read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
switch (cmd_in) {
case SG_IO:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (!scsi_block_when_processing_errors(sdp->device))
return -ENXIO;
@@ -837,8 +873,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
- (srp_done(sfp, srp) || sdp->detached));
- if (sdp->detached)
+ (srp_done(sfp, srp) || atomic_read(&sdp->detaching)));
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
write_lock_irq(&sfp->rq_list_lock);
if (srp->done) {
@@ -873,11 +909,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
sfp->low_dma = 1;
if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
val = (int) sfp->reserve.bufflen;
- sg_remove_scat(&sfp->reserve);
+ sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
} else {
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
sfp->low_dma = sdp->device->host->unchecked_isa_dma;
}
@@ -890,7 +926,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
else {
sg_scsi_id_t __user *sg_idp = p;
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
__put_user((int) sdp->device->host->host_no,
&sg_idp->host_no);
@@ -945,17 +981,17 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (val < 0)
return -EINVAL;
val = min_t(int, val,
- queue_max_sectors(sdp->device->request_queue) * 512);
+ max_sectors_bytes(sdp->device->request_queue));
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
- sg_remove_scat(&sfp->reserve);
+ sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
}
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
- queue_max_sectors(sdp->device->request_queue) * 512);
+ max_sectors_bytes(sdp->device->request_queue));
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
@@ -1032,11 +1068,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return result;
}
case SG_EMULATED_HOST:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
return put_user(sdp->device->host->hostt->emulated, ip);
case SG_SCSI_RESET:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (filp->f_flags & O_NONBLOCK) {
if (scsi_host_in_recovery(sdp->device->host))
@@ -1069,7 +1105,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return (scsi_reset_provider(sdp->device, val) ==
SUCCESS) ? 0 : -EIO;
case SCSI_IOCTL_SEND_COMMAND:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
if (read_only) {
unsigned char opcode = WRITE_6;
@@ -1091,11 +1127,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
case SCSI_IOCTL_GET_BUS_NUMBER:
case SCSI_IOCTL_PROBE_HOST:
case SG_GET_TRANSFORM:
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
return -ENODEV;
return scsi_ioctl(sdp->device, cmd_in, p);
case BLKSECTGET:
- return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
+ return put_user(max_sectors_bytes(sdp->device->request_queue),
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
@@ -1165,15 +1201,15 @@ sg_poll(struct file *filp, poll_table * wait)
}
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- if (sdp->detached)
+ if (atomic_read(&sdp->detaching))
res |= POLLHUP;
else if (!sfp->cmd_q) {
if (0 == count)
res |= POLLOUT | POLLWRNORM;
} else if (count < SG_MAX_QUEUE)
res |= POLLOUT | POLLWRNORM;
- SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
- sdp->disk->disk_name, (int) res));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_poll: res=0x%x\n", (int) res));
return res;
}
@@ -1185,8 +1221,8 @@ sg_fasync(int fd, struct file *filp, int mode)
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
- SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
- sdp->disk->disk_name, mode));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_fasync: mode=%d\n", mode));
return fasync_helper(fd, filp, mode, &sfp->async_qp);
}
@@ -1205,8 +1241,9 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
offset = vmf->pgoff << PAGE_SHIFT;
if (offset >= rsv_schp->bufflen)
return VM_FAULT_SIGBUS;
- SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
- offset, rsv_schp->k_use_sg));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_vma_fault: offset=%lu, scatg=%d\n",
+ offset, rsv_schp->k_use_sg));
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
@@ -1241,8 +1278,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
req_sz = vma->vm_end - vma->vm_start;
- SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
- (void *) vma->vm_start, (int) req_sz));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_mmap starting, vm_start=%p, len=%d\n",
+ (void *) vma->vm_start, (int) req_sz));
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
@@ -1264,7 +1302,8 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-static void sg_rq_end_io_usercontext(struct work_struct *work)
+static void
+sg_rq_end_io_usercontext(struct work_struct *work)
{
struct sg_request *srp = container_of(work, struct sg_request, ew.work);
struct sg_fd *sfp = srp->parentfp;
@@ -1277,7 +1316,8 @@ static void sg_rq_end_io_usercontext(struct work_struct *work)
* This function is a "bottom half" handler that is called by the mid
* level when a command is completed (or has failed).
*/
-static void sg_rq_end_io(struct request *rq, int uptodate)
+static void
+sg_rq_end_io(struct request *rq, int uptodate)
{
struct sg_request *srp = rq->end_io_data;
Sg_device *sdp;
@@ -1295,15 +1335,16 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
return;
sdp = sfp->parentdp;
- if (unlikely(sdp->detached))
- printk(KERN_INFO "sg_rq_end_io: device detached\n");
+ if (unlikely(atomic_read(&sdp->detaching)))
+ pr_info("%s: device detaching\n", __func__);
sense = rq->sense;
result = rq->errors;
resid = rq->resid_len;
- SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
- sdp->disk->disk_name, srp->header.pack_id, result));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
+ "sg_cmd_done: pack_id=%d, res=0x%x\n",
+ srp->header.pack_id, result));
srp->header.resid = resid;
ms = jiffies_to_msecs(jiffies);
srp->header.duration = (ms > srp->header.duration) ?
@@ -1319,7 +1360,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
if ((sdp->sgdebug > 0) &&
((CHECK_CONDITION == srp->header.masked_status) ||
(COMMAND_TERMINATED == srp->header.masked_status)))
- __scsi_print_sense("sg_cmd_done", sense,
+ __scsi_print_sense(__func__, sense,
SCSI_SENSE_BUFFERSIZE);
/* Following if statement is a patch supplied by Eric Youngdale */
@@ -1378,7 +1419,8 @@ static struct class *sg_sysfs_class;
static int sg_sysfs_valid = 0;
-static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
+static Sg_device *
+sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
{
struct request_queue *q = scsidp->request_queue;
Sg_device *sdp;
@@ -1388,7 +1430,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
if (!sdp) {
- printk(KERN_WARNING "kmalloc Sg_device failure\n");
+ sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device "
+ "failure\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -1403,20 +1446,25 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
scsidp->type, SG_MAX_DEVS - 1);
error = -ENODEV;
} else {
- printk(KERN_WARNING
- "idr allocation Sg_device failure: %d\n", error);
+ sdev_printk(KERN_WARNING, scsidp, "%s: idr "
+ "allocation Sg_device failure: %d\n",
+ __func__, error);
}
goto out_unlock;
}
k = error;
- SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
+ SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp,
+ "sg_alloc: dev=%d \n", k));
sprintf(disk->disk_name, "sg%d", k);
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
+ mutex_init(&sdp->open_rel_lock);
INIT_LIST_HEAD(&sdp->sfds);
- init_waitqueue_head(&sdp->o_excl_wait);
+ init_waitqueue_head(&sdp->open_wait);
+ atomic_set(&sdp->detaching, 0);
+ rwlock_init(&sdp->sfd_lock);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -1434,7 +1482,7 @@ out_unlock:
}
static int
-sg_add(struct device *cl_dev, struct class_interface *cl_intf)
+sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
struct gendisk *disk;
@@ -1445,7 +1493,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
disk = alloc_disk(1);
if (!disk) {
- printk(KERN_WARNING "alloc_disk failed\n");
+ pr_warn("%s: alloc_disk failed\n", __func__);
return -ENOMEM;
}
disk->major = SCSI_GENERIC_MAJOR;
@@ -1453,7 +1501,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
error = -ENOMEM;
cdev = cdev_alloc();
if (!cdev) {
- printk(KERN_WARNING "cdev_alloc failed\n");
+ pr_warn("%s: cdev_alloc failed\n", __func__);
goto out;
}
cdev->owner = THIS_MODULE;
@@ -1461,7 +1509,7 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
sdp = sg_alloc(disk, scsidp);
if (IS_ERR(sdp)) {
- printk(KERN_WARNING "sg_alloc failed\n");
+ pr_warn("%s: sg_alloc failed\n", __func__);
error = PTR_ERR(sdp);
goto out;
}
@@ -1479,22 +1527,20 @@ sg_add(struct device *cl_dev, struct class_interface *cl_intf)
sdp->index),
sdp, "%s", disk->disk_name);
if (IS_ERR(sg_class_member)) {
- printk(KERN_ERR "sg_add: "
- "device_create failed\n");
+ pr_err("%s: device_create failed\n", __func__);
error = PTR_ERR(sg_class_member);
goto cdev_add_err;
}
error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
&sg_class_member->kobj, "generic");
if (error)
- printk(KERN_ERR "sg_add: unable to make symlink "
- "'generic' back to sg%d\n", sdp->index);
+ pr_err("%s: unable to make symlink 'generic' back "
+ "to sg%d\n", __func__, sdp->index);
} else
- printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
+ pr_warn("%s: sg_sys Invalid\n", __func__);
- sdev_printk(KERN_NOTICE, scsidp,
- "Attached scsi generic sg%d type %d\n", sdp->index,
- scsidp->type);
+ sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
+ "type %d\n", sdp->index, scsidp->type);
dev_set_drvdata(cl_dev, sdp);
@@ -1513,7 +1559,8 @@ out:
return error;
}
-static void sg_device_destroy(struct kref *kref)
+static void
+sg_device_destroy(struct kref *kref)
{
struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
unsigned long flags;
@@ -1528,40 +1575,45 @@ static void sg_device_destroy(struct kref *kref)
write_unlock_irqrestore(&sg_index_lock, flags);
SCSI_LOG_TIMEOUT(3,
- printk("sg_device_destroy: %s\n",
- sdp->disk->disk_name));
+ sg_printk(KERN_INFO, sdp, "sg_device_destroy\n"));
put_disk(sdp->disk);
kfree(sdp);
}
-static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
+static void
+sg_remove_device(struct device *cl_dev, struct class_interface *cl_intf)
{
struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
Sg_device *sdp = dev_get_drvdata(cl_dev);
unsigned long iflags;
Sg_fd *sfp;
+ int val;
- if (!sdp || sdp->detached)
+ if (!sdp)
return;
+ /* want sdp->detaching non-zero as soon as possible */
+ val = atomic_inc_return(&sdp->detaching);
+ if (val > 1)
+ return; /* only want to do following once per device */
- SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "%s\n", __func__));
- /* Need a write lock to set sdp->detached. */
- write_lock_irqsave(&sg_index_lock, iflags);
- sdp->detached = 1;
+ read_lock_irqsave(&sdp->sfd_lock, iflags);
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
- wake_up_interruptible(&sfp->read_wait);
+ wake_up_interruptible_all(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
- write_unlock_irqrestore(&sg_index_lock, iflags);
+ wake_up_interruptible_all(&sdp->open_wait);
+ read_unlock_irqrestore(&sdp->sfd_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
cdev_del(sdp->cdev);
sdp->cdev = NULL;
- sg_put_dev(sdp);
+ kref_put(&sdp->d_ref, sg_device_destroy);
}
module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
@@ -1631,7 +1683,8 @@ exit_sg(void)
idr_destroy(&sg_index_idr);
}
-static int sg_start_req(Sg_request *srp, unsigned char *cmd)
+static int
+sg_start_req(Sg_request *srp, unsigned char *cmd)
{
int res;
struct request *rq;
@@ -1645,15 +1698,28 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
struct request_queue *q = sfp->parentdp->device->request_queue;
struct rq_map_data *md, map_data;
int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
+ unsigned char *long_cmdp = NULL;
- SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
- dxfer_len));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_start_req: dxfer_len=%d\n",
+ dxfer_len));
+
+ if (hp->cmd_len > BLK_MAX_CDB) {
+ long_cmdp = kzalloc(hp->cmd_len, GFP_KERNEL);
+ if (!long_cmdp)
+ return -ENOMEM;
+ }
rq = blk_get_request(q, rw, GFP_ATOMIC);
- if (!rq)
+ if (!rq) {
+ kfree(long_cmdp);
return -ENOMEM;
+ }
blk_rq_set_block_pc(rq);
+
+ if (hp->cmd_len > BLK_MAX_CDB)
+ rq->cmd = long_cmdp;
memcpy(rq->cmd, cmd, hp->cmd_len);
rq->cmd_len = hp->cmd_len;
@@ -1726,25 +1792,30 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)
return res;
}
-static int sg_finish_rem_req(Sg_request * srp)
+static int
+sg_finish_rem_req(Sg_request *srp)
{
int ret = 0;
Sg_fd *sfp = srp->parentfp;
Sg_scatter_hold *req_schp = &srp->data;
- SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_finish_rem_req: res_used=%d\n",
+ (int) srp->res_used));
if (srp->rq) {
if (srp->bio)
ret = blk_rq_unmap_user(srp->bio);
+ if (srp->rq->cmd != srp->rq->__cmd)
+ kfree(srp->rq->cmd);
blk_put_request(srp->rq);
}
if (srp->res_used)
sg_unlink_reserve(sfp, srp);
else
- sg_remove_scat(req_schp);
+ sg_remove_scat(sfp, req_schp);
sg_remove_request(sfp, srp);
@@ -1778,8 +1849,9 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
++blk_size; /* don't know why */
/* round request up to next highest SG_SECTOR_SZ byte boundary */
blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
- SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
- buff_size, blk_size));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: buff_size=%d, blk_size=%d\n",
+ buff_size, blk_size));
/* N.B. ret_sz carried into this block ... */
mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
@@ -1822,14 +1894,16 @@ retry:
}
}
- SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
- "ret_sz=%d\n", k, num, ret_sz));
+ SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n",
+ k, num, ret_sz));
} /* end of for loop */
schp->page_order = order;
schp->k_use_sg = k;
- SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
- "rem_sz=%d\n", k, rem_sz));
+ SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n",
+ k, rem_sz));
schp->bufflen = blk_size;
if (rem_sz > 0) /* must have failed */
@@ -1846,17 +1920,19 @@ out:
}
static void
-sg_remove_scat(Sg_scatter_hold * schp)
+sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp)
{
- SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
if (schp->pages && schp->sglist_len > 0) {
if (!schp->dio_in_use) {
int k;
for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
- SCSI_LOG_TIMEOUT(5, printk(
- "sg_remove_scat: k=%d, pg=0x%p\n",
- k, schp->pages[k]));
+ SCSI_LOG_TIMEOUT(5,
+ sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_remove_scat: k=%d, pg=0x%p\n",
+ k, schp->pages[k]));
__free_pages(schp->pages[k], schp->page_order);
}
@@ -1872,8 +1948,9 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
Sg_scatter_hold *schp = &srp->data;
int k, num;
- SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
- num_read_xfer));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
+ "sg_read_oxfer: num_read_xfer=%d\n",
+ num_read_xfer));
if ((!outp) || (num_read_xfer <= 0))
return 0;
@@ -1903,14 +1980,15 @@ sg_build_reserve(Sg_fd * sfp, int req_size)
{
Sg_scatter_hold *schp = &sfp->reserve;
- SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_build_reserve: req_size=%d\n", req_size));
do {
if (req_size < PAGE_SIZE)
req_size = PAGE_SIZE;
if (0 == sg_build_indirect(schp, sfp, req_size))
return;
else
- sg_remove_scat(schp);
+ sg_remove_scat(sfp, schp);
req_size >>= 1; /* divide by 2 */
} while (req_size > (PAGE_SIZE / 2));
}
@@ -1923,7 +2001,8 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
int k, num, rem;
srp->res_used = 1;
- SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_link_reserve: size=%d\n", size));
rem = size;
num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1941,7 +2020,8 @@ sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
}
if (k >= rsv_schp->k_use_sg)
- SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
+ SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ "sg_link_reserve: BAD size\n"));
}
static void
@@ -1949,8 +2029,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
{
Sg_scatter_hold *req_schp = &srp->data;
- SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
- (int) req_schp->k_use_sg));
+ SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp,
+ "sg_unlink_reserve: req->k_use_sg=%d\n",
+ (int) req_schp->k_use_sg));
req_schp->k_use_sg = 0;
req_schp->bufflen = 0;
req_schp->pages = NULL;
@@ -2055,7 +2136,7 @@ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
}
static Sg_fd *
-sg_add_sfp(Sg_device * sdp, int dev)
+sg_add_sfp(Sg_device * sdp)
{
Sg_fd *sfp;
unsigned long iflags;
@@ -2063,7 +2144,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
@@ -2077,25 +2158,33 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
- write_lock_irqsave(&sg_index_lock, iflags);
+ write_lock_irqsave(&sdp->sfd_lock, iflags);
+ if (atomic_read(&sdp->detaching)) {
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ return ERR_PTR(-ENODEV);
+ }
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
- queue_max_sectors(sdp->device->request_queue) * 512);
+ max_sectors_bytes(sdp->device->request_queue));
sg_build_reserve(sfp, bufflen);
- SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
- sfp->reserve.bufflen, sfp->reserve.k_use_sg));
+ SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+ "sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
+ sfp->reserve.bufflen,
+ sfp->reserve.k_use_sg));
kref_get(&sdp->d_ref);
__module_get(THIS_MODULE);
return sfp;
}
-static void sg_remove_sfp_usercontext(struct work_struct *work)
+static void
+sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
@@ -2105,34 +2194,32 @@ static void sg_remove_sfp_usercontext(struct work_struct *work)
sg_finish_rem_req(sfp->headrp);
if (sfp->reserve.bufflen > 0) {
- SCSI_LOG_TIMEOUT(6,
- printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
+ SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+ "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
(int) sfp->reserve.bufflen,
(int) sfp->reserve.k_use_sg));
- sg_remove_scat(&sfp->reserve);
+ sg_remove_scat(sfp, &sfp->reserve);
}
- SCSI_LOG_TIMEOUT(6,
- printk("sg_remove_sfp: %s, sfp=0x%p\n",
- sdp->disk->disk_name,
- sfp));
+ SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+ "sg_remove_sfp: sfp=0x%p\n", sfp));
kfree(sfp);
scsi_device_put(sdp->device);
- sg_put_dev(sdp);
+ kref_put(&sdp->d_ref, sg_device_destroy);
module_put(THIS_MODULE);
}
-static void sg_remove_sfp(struct kref *kref)
+static void
+sg_remove_sfp(struct kref *kref)
{
struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
- write_lock_irqsave(&sg_index_lock, iflags);
+ write_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- wake_up_interruptible(&sdp->o_excl_wait);
+ write_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
@@ -2183,7 +2270,8 @@ static Sg_device *sg_lookup_dev(int dev)
return idr_find(&sg_index_idr, dev);
}
-static Sg_device *sg_get_dev(int dev)
+static Sg_device *
+sg_get_dev(int dev)
{
struct sg_device *sdp;
unsigned long flags;
@@ -2192,8 +2280,8 @@ static Sg_device *sg_get_dev(int dev)
sdp = sg_lookup_dev(dev);
if (!sdp)
sdp = ERR_PTR(-ENXIO);
- else if (sdp->detached) {
- /* If sdp->detached, then the refcount may already be 0, in
+ else if (atomic_read(&sdp->detaching)) {
+ /* If sdp->detaching, then the refcount may already be 0, in
* which case it would be a bug to do kref_get().
*/
sdp = ERR_PTR(-ENODEV);
@@ -2204,11 +2292,6 @@ static Sg_device *sg_get_dev(int dev)
return sdp;
}
-static void sg_put_dev(struct sg_device *sdp)
-{
- kref_put(&sdp->d_ref, sg_device_destroy);
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static struct proc_dir_entry *sg_proc_sgp = NULL;
@@ -2425,8 +2508,7 @@ static int sg_proc_single_open_version(struct inode *inode, struct file *file)
static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
{
- seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
- "online\n");
+ seq_puts(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n");
return 0;
}
@@ -2482,16 +2564,19 @@ static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && (scsidp = sdp->device) && (!sdp->detached))
- seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
+ if ((NULL == sdp) || (NULL == sdp->device) ||
+ (atomic_read(&sdp->detaching)))
+ seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
+ else {
+ scsidp = sdp->device;
+ seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
- (int) scsidp->device_busy,
+ (int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
- else
- seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
+ }
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
@@ -2510,11 +2595,12 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && (scsidp = sdp->device) && (!sdp->detached))
+ scsidp = sdp ? sdp->device : NULL;
+ if (sdp && scsidp && (!atomic_read(&sdp->detaching)))
seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
scsidp->vendor, scsidp->model, scsidp->rev);
else
- seq_printf(s, "<no active device>\n");
+ seq_puts(s, "<no active device>\n");
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
@@ -2559,12 +2645,12 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
else
cp = " ";
}
- seq_printf(s, cp);
+ seq_puts(s, cp);
blen = srp->data.bufflen;
usg = srp->data.k_use_sg;
- seq_printf(s, srp->done ?
- ((1 == srp->done) ? "rcv:" : "fin:")
- : "act:");
+ seq_puts(s, srp->done ?
+ ((1 == srp->done) ? "rcv:" : "fin:")
+ : "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
@@ -2580,7 +2666,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
(int) srp->data.cmd_opcode);
}
if (0 == m)
- seq_printf(s, " No requests active\n");
+ seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}
}
@@ -2596,31 +2682,34 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
Sg_device *sdp;
unsigned long iflags;
- if (it && (0 == it->index)) {
- seq_printf(s, "max_active_device=%d(origin 1)\n",
- (int)it->max);
- seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
- }
+ if (it && (0 == it->index))
+ seq_printf(s, "max_active_device=%d def_reserved_size=%d\n",
+ (int)it->max, sg_big_buff);
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && !list_empty(&sdp->sfds)) {
- struct scsi_device *scsidp = sdp->device;
-
+ if (NULL == sdp)
+ goto skip;
+ read_lock(&sdp->sfd_lock);
+ if (!list_empty(&sdp->sfds)) {
seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
- if (sdp->detached)
- seq_printf(s, "detached pending close ");
- else
- seq_printf
- (s, "scsi%d chan=%d id=%d lun=%d em=%d",
- scsidp->host->host_no,
- scsidp->channel, scsidp->id,
- scsidp->lun,
- scsidp->host->hostt->emulated);
- seq_printf(s, " sg_tablesize=%d excl=%d\n",
- sdp->sg_tablesize, get_exclude(sdp));
+ if (atomic_read(&sdp->detaching))
+ seq_puts(s, "detaching pending close ");
+ else if (sdp->device) {
+ struct scsi_device *scsidp = sdp->device;
+
+ seq_printf(s, "%d:%d:%d:%llu em=%d",
+ scsidp->host->host_no,
+ scsidp->channel, scsidp->id,
+ scsidp->lun,
+ scsidp->host->hostt->emulated);
+ }
+ seq_printf(s, " sg_tablesize=%d excl=%d open_cnt=%d\n",
+ sdp->sg_tablesize, sdp->exclude, sdp->open_cnt);
sg_proc_debug_helper(s, sdp);
}
+ read_unlock(&sdp->sfd_lock);
+skip:
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 93cbd36c990b..7eeb93627beb 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -292,8 +292,8 @@ do_tur:
if (!cd->tur_changed) {
if (cd->get_event_changed) {
if (cd->tur_mismatch++ > 8) {
- sdev_printk(KERN_WARNING, cd->device,
- "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n");
+ sr_printk(KERN_WARNING, cd,
+ "GET_EVENT and TUR disagree continuously, suppress GET_EVENT events\n");
cd->ignore_get_event = true;
}
} else {
@@ -322,7 +322,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
#ifdef DEBUG
- printk("sr.c done: %x\n", result);
+ scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
#endif
/*
@@ -385,10 +385,9 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
int block = 0, this_count, s_size;
struct scsi_cd *cd;
struct request *rq = SCpnt->request;
- struct scsi_device *sdp = SCpnt->device;
int ret;
- ret = scsi_setup_fs_cmnd(sdp, rq);
+ ret = scsi_init_io(SCpnt, GFP_ATOMIC);
if (ret != BLKPREP_OK)
goto out;
SCpnt = rq->special;
@@ -398,13 +397,14 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
* is used for a killable error condition */
ret = BLKPREP_KILL;
- SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %s, block = %d\n",
- cd->disk->disk_name, block));
+ SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
+ "Doing sr request, block = %d\n", block));
if (!cd->device || !scsi_device_online(cd->device)) {
- SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n",
- blk_rq_sectors(rq)));
- SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "Finishing %u sectors\n", blk_rq_sectors(rq)));
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "Retry with 0x%p\n", SCpnt));
goto out;
}
@@ -425,7 +425,8 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
if (!in_interrupt())
sr_set_blocklength(cd, 2048);
else
- printk("sr: can't switch blocksize: in interrupt\n");
+ scmd_printk(KERN_INFO, SCpnt,
+ "can't switch blocksize: in interrupt\n");
}
if (s_size != 512 && s_size != 1024 && s_size != 2048) {
@@ -434,14 +435,12 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
}
if (rq_data_dir(rq) == WRITE) {
- if (!cd->device->writeable)
+ if (!cd->writeable)
goto out;
SCpnt->cmnd[0] = WRITE_10;
- SCpnt->sc_data_direction = DMA_TO_DEVICE;
- cd->cdi.media_written = 1;
+ cd->cdi.media_written = 1;
} else if (rq_data_dir(rq) == READ) {
SCpnt->cmnd[0] = READ_10;
- SCpnt->sc_data_direction = DMA_FROM_DEVICE;
} else {
blk_dump_rq_flags(rq, "Unknown sr command");
goto out;
@@ -475,11 +474,11 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9);
- SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n",
- cd->cdi.name,
- (rq_data_dir(rq) == WRITE) ?
+ SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
+ "%s %d/%u 512 byte blocks.\n",
+ (rq_data_dir(rq) == WRITE) ?
"writing" : "reading",
- this_count, blk_rq_sectors(rq)));
+ this_count, blk_rq_sectors(rq)));
SCpnt->cmnd[1] = 0;
block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9);
@@ -810,8 +809,8 @@ static void get_sectorsize(struct scsi_cd *cd)
case 512:
break;
default:
- printk("%s: unsupported sector size %d.\n",
- cd->cdi.name, sector_size);
+ sr_printk(KERN_INFO, cd,
+ "unsupported sector size %d.", sector_size);
cd->capacity = 0;
}
@@ -853,7 +852,7 @@ static void get_capabilities(struct scsi_cd *cd)
/* allocate transfer buffer */
buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer) {
- printk(KERN_ERR "sr: out of memory.\n");
+ sr_printk(KERN_ERR, cd, "out of memory.\n");
return;
}
@@ -872,7 +871,7 @@ static void get_capabilities(struct scsi_cd *cd)
CDC_SELECT_DISC | CDC_SELECT_SPEED |
CDC_MRW | CDC_MRW_W | CDC_RAM);
kfree(buffer);
- printk("%s: scsi-1 drive\n", cd->cdi.name);
+ sr_printk(KERN_INFO, cd, "scsi-1 drive");
return;
}
@@ -881,22 +880,23 @@ static void get_capabilities(struct scsi_cd *cd)
cd->readcd_known = 1;
cd->readcd_cdda = buffer[n + 5] & 0x01;
/* print some capability bits */
- printk("%s: scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n", cd->cdi.name,
- ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
- cd->cdi.speed,
- buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
- buffer[n + 3] & 0x20 ? "dvd-ram " : "",
- buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */
- buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */
- buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */
- loadmech[buffer[n + 6] >> 5]);
+ sr_printk(KERN_INFO, cd,
+ "scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n",
+ ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
+ cd->cdi.speed,
+ buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
+ buffer[n + 3] & 0x20 ? "dvd-ram " : "",
+ buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */
+ buffer[n + 4] & 0x20 ? "xa/form2 " : "", /* can read xa/from2 */
+ buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */
+ loadmech[buffer[n + 6] >> 5]);
if ((buffer[n + 6] >> 5) == 0)
/* caddy drives can't close tray... */
cd->cdi.mask |= CDC_CLOSE_TRAY;
if ((buffer[n + 2] & 0x8) == 0)
/* not a DVD drive */
cd->cdi.mask |= CDC_DVD;
- if ((buffer[n + 3] & 0x20) == 0)
+ if ((buffer[n + 3] & 0x20) == 0)
/* can't write DVD-RAM media */
cd->cdi.mask |= CDC_DVD_RAM;
if ((buffer[n + 3] & 0x10) == 0)
@@ -927,7 +927,7 @@ static void get_capabilities(struct scsi_cd *cd)
*/
if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) !=
(CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) {
- cd->device->writeable = 1;
+ cd->writeable = 1;
}
kfree(buffer);
@@ -935,7 +935,7 @@ static void get_capabilities(struct scsi_cd *cd)
/*
* sr_packet() is the entry point for the generic commands generated
- * by the Uniform CD-ROM layer.
+ * by the Uniform CD-ROM layer.
*/
static int sr_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
diff --git a/drivers/scsi/sr.h b/drivers/scsi/sr.h
index 37c8f6b17510..1d1f6f416c59 100644
--- a/drivers/scsi/sr.h
+++ b/drivers/scsi/sr.h
@@ -36,6 +36,7 @@ typedef struct scsi_cd {
struct scsi_device *device;
unsigned int vendor; /* vendor code, see sr_vendor.c */
unsigned long ms_offset; /* for reading multisession-CD's */
+ unsigned writeable : 1;
unsigned use:1; /* is this device still supportable */
unsigned xa_flag:1; /* CD has XA sectors ? */
unsigned readcd_known:1; /* drive supports READ_CD (0xbe) */
@@ -55,6 +56,10 @@ typedef struct scsi_cd {
struct gendisk *disk;
} Scsi_CD;
+#define sr_printk(prefix, cd, fmt, a...) \
+ sdev_printk(prefix, (cd)->device, "[%s] " fmt, \
+ (cd)->cdi.name, ##a)
+
int sr_do_ioctl(Scsi_CD *, struct packet_command *);
int sr_lock_door(struct cdrom_device_info *, int);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index a3911c39ea50..6389fcff12ec 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -36,7 +36,6 @@ module_param(xa_test, int, S_IRUGO | S_IWUSR);
* the status of the unchecked_isa_dma flag in the host structure */
#define SR_GFP_DMA(cd) (((cd)->device->host->unchecked_isa_dma) ? GFP_DMA : 0)
-
static int sr_read_tochdr(struct cdrom_device_info *cdi,
struct cdrom_tochdr *tochdr)
{
@@ -219,7 +218,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
case UNIT_ATTENTION:
SDev->changed = 1;
if (!cgc->quiet)
- printk(KERN_INFO "%s: disc change detected.\n", cd->cdi.name);
+ sr_printk(KERN_INFO, cd,
+ "disc change detected.\n");
if (retries++ < 10)
goto retry;
err = -ENOMEDIUM;
@@ -229,7 +229,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
sshdr.ascq == 0x01) {
/* sense: Logical unit is in process of becoming ready */
if (!cgc->quiet)
- printk(KERN_INFO "%s: CDROM not ready yet.\n", cd->cdi.name);
+ sr_printk(KERN_INFO, cd,
+ "CDROM not ready yet.\n");
if (retries++ < 10) {
/* sleep 2 sec and try again */
ssleep(2);
@@ -241,7 +242,9 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
}
}
if (!cgc->quiet)
- printk(KERN_INFO "%s: CDROM not ready. Make sure there is a disc in the drive.\n", cd->cdi.name);
+ sr_printk(KERN_INFO, cd,
+ "CDROM not ready. Make sure there "
+ "is a disc in the drive.\n");
#ifdef DEBUG
scsi_print_sense_hdr("sr", &sshdr);
#endif
@@ -259,7 +262,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
#endif
break;
default:
- printk(KERN_ERR "%s: CDROM (ioctl) error, command: ", cd->cdi.name);
+ sr_printk(KERN_ERR, cd,
+ "CDROM (ioctl) error, command: ");
__scsi_print_command(cgc->cmd);
scsi_print_sense_hdr("sr", &sshdr);
err = -EIO;
@@ -491,8 +495,8 @@ static int sr_read_cd(Scsi_CD *cd, unsigned char *dest, int lba, int format, int
struct packet_command cgc;
#ifdef DEBUG
- printk("%s: sr_read_cd lba=%d format=%d blksize=%d\n",
- cd->cdi.name, lba, format, blksize);
+ sr_printk(KERN_INFO, cd, "sr_read_cd lba=%d format=%d blksize=%d\n",
+ lba, format, blksize);
#endif
memset(&cgc, 0, sizeof(struct packet_command));
@@ -539,7 +543,8 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
if (-EDRIVE_CANT_DO_THIS != rc)
return rc;
cd->readcd_known = 0;
- printk("CDROM does'nt support READ CD (0xbe) command\n");
+ sr_printk(KERN_INFO, cd,
+ "CDROM does'nt support READ CD (0xbe) command\n");
/* fall & retry the other way */
}
/* ... if this fails, we switch the blocksize using MODE SELECT */
@@ -548,7 +553,8 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
return rc;
}
#ifdef DEBUG
- printk("%s: sr_read_sector lba=%d blksize=%d\n", cd->cdi.name, lba, blksize);
+ sr_printk(KERN_INFO, cd, "sr_read_sector lba=%d blksize=%d\n",
+ lba, blksize);
#endif
memset(&cgc, 0, sizeof(struct packet_command));
@@ -592,7 +598,7 @@ int sr_is_xa(Scsi_CD *cd)
}
kfree(raw_sector);
#ifdef DEBUG
- printk("%s: sr_is_xa: %d\n", cd->cdi.name, is_xa);
+ sr_printk(KERN_INFO, cd, "sr_is_xa: %d\n", is_xa);
#endif
return is_xa;
}
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 92cc2efb25d7..11a238cb2222 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -123,7 +123,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
return -ENOMEM;
#ifdef DEBUG
- printk("%s: MODE SELECT 0x%x/%d\n", cd->cdi.name, density, blocklength);
+ sr_printk(KERN_INFO, cd, "MODE SELECT 0x%x/%d\n", density, blocklength);
#endif
memset(&cgc, 0, sizeof(struct packet_command));
cgc.cmd[0] = MODE_SELECT;
@@ -144,8 +144,9 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
}
#ifdef DEBUG
else
- printk("%s: switching blocklength to %d bytes failed\n",
- cd->cdi.name, blocklength);
+ sr_printk(KERN_INFO, cd,
+ "switching blocklength to %d bytes failed\n",
+ blocklength);
#endif
kfree(buffer);
return rc;
@@ -190,8 +191,8 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (rc != 0)
break;
if ((buffer[0] << 8) + buffer[1] < 0x0a) {
- printk(KERN_INFO "%s: Hmm, seems the drive "
- "doesn't support multisession CD's\n", cd->cdi.name);
+ sr_printk(KERN_INFO, cd, "Hmm, seems the drive "
+ "doesn't support multisession CD's\n");
no_multi = 1;
break;
}
@@ -218,9 +219,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (rc != 0)
break;
if (buffer[14] != 0 && buffer[14] != 0xb0) {
- printk(KERN_INFO "%s: Hmm, seems the cdrom "
- "doesn't support multisession CD's\n",
- cd->cdi.name);
+ sr_printk(KERN_INFO, cd, "Hmm, seems the cdrom "
+ "doesn't support multisession CD's\n");
+
no_multi = 1;
break;
}
@@ -245,9 +246,8 @@ int sr_cd_check(struct cdrom_device_info *cdi)
cgc.timeout = VENDOR_TIMEOUT;
rc = sr_do_ioctl(cd, &cgc);
if (rc == -EINVAL) {
- printk(KERN_INFO "%s: Hmm, seems the drive "
- "doesn't support multisession CD's\n",
- cd->cdi.name);
+ sr_printk(KERN_INFO, cd, "Hmm, seems the drive "
+ "doesn't support multisession CD's\n");
no_multi = 1;
break;
}
@@ -277,8 +277,8 @@ int sr_cd_check(struct cdrom_device_info *cdi)
break;
}
if ((rc = buffer[2]) == 0) {
- printk(KERN_WARNING
- "%s: No finished session\n", cd->cdi.name);
+ sr_printk(KERN_WARNING, cd,
+ "No finished session\n");
break;
}
cgc.cmd[0] = READ_TOC; /* Read TOC */
@@ -301,9 +301,9 @@ int sr_cd_check(struct cdrom_device_info *cdi)
default:
/* should not happen */
- printk(KERN_WARNING
- "%s: unknown vendor code (%i), not initialized ?\n",
- cd->cdi.name, cd->vendor);
+ sr_printk(KERN_WARNING, cd,
+ "unknown vendor code (%i), not initialized ?\n",
+ cd->vendor);
sector = 0;
no_multi = 1;
break;
@@ -321,8 +321,8 @@ int sr_cd_check(struct cdrom_device_info *cdi)
#ifdef DEBUG
if (sector)
- printk(KERN_DEBUG "%s: multisession offset=%lu\n",
- cd->cdi.name, sector);
+ sr_printk(KERN_DEBUG, cd, "multisession offset=%lu\n",
+ sector);
#endif
kfree(buffer);
return rc;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 14eb4b256a03..aff9689de0f7 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -58,11 +58,11 @@ static const char *verstr = "20101219";
is defined and non-zero. */
#define DEBUG 0
+#define ST_DEB_MSG KERN_NOTICE
#if DEBUG
/* The message level for the debug messages is currently set to KERN_NOTICE
so that people can easily see the messages. Later when the debugging messages
in the drivers are more widely classified, this may be changed to KERN_DEBUG. */
-#define ST_DEB_MSG KERN_NOTICE
#define DEB(a) a
#define DEBC(a) if (debugging) { a ; }
#else
@@ -305,6 +305,15 @@ static inline char *tape_name(struct scsi_tape *tape)
return tape->disk->disk_name;
}
+#define st_printk(prefix, t, fmt, a...) \
+ sdev_printk(prefix, (t)->device, "%s: " fmt, \
+ tape_name(t), ##a)
+#ifdef DEBUG
+#define DEBC_printk(t, fmt, a...) \
+ if (debugging) { st_printk(ST_DEB_MSG, t, fmt, ##a ); }
+#else
+#define DEBC_printk(t, fmt, a...)
+#endif
static void st_analyze_sense(struct st_request *SRpnt, struct st_cmdstatus *s)
{
@@ -358,21 +367,20 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
else
scode = 0;
- DEB(
- if (debugging) {
- printk(ST_DEB_MSG "%s: Error: %x, cmd: %x %x %x %x %x %x\n",
- name, result,
- SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
- SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
+ DEB(
+ if (debugging) {
+ st_printk(ST_DEB_MSG, STp,
+ "Error: %x, cmd: %x %x %x %x %x %x\n", result,
+ SRpnt->cmd[0], SRpnt->cmd[1], SRpnt->cmd[2],
+ SRpnt->cmd[3], SRpnt->cmd[4], SRpnt->cmd[5]);
if (cmdstatp->have_sense)
__scsi_print_sense(name, SRpnt->sense, SCSI_SENSE_BUFFERSIZE);
} ) /* end DEB */
if (!debugging) { /* Abnormal conditions for tape */
if (!cmdstatp->have_sense)
- printk(KERN_WARNING
- "%s: Error %x (driver bt 0x%x, host bt 0x%x).\n",
- name, result, driver_byte(result),
- host_byte(result));
+ st_printk(KERN_WARNING, STp,
+ "Error %x (driver bt 0x%x, host bt 0x%x).\n",
+ result, driver_byte(result), host_byte(result));
else if (cmdstatp->have_sense &&
scode != NO_SENSE &&
scode != RECOVERED_ERROR &&
@@ -411,7 +419,7 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
STp->recover_count++;
STp->recover_reg++;
- DEB(
+ DEB(
if (debugging) {
if (SRpnt->cmd[0] == READ_6)
stp = "read";
@@ -419,8 +427,9 @@ static int st_chk_result(struct scsi_tape *STp, struct st_request * SRpnt)
stp = "write";
else
stp = "ioctl";
- printk(ST_DEB_MSG "%s: Recovered %s error (%d).\n", name, stp,
- STp->recover_count);
+ st_printk(ST_DEB_MSG, STp,
+ "Recovered %s error (%d).\n",
+ stp, STp->recover_count);
} ) /* end DEB */
if (cmdstatp->flags == 0)
@@ -437,8 +446,8 @@ static struct st_request *st_allocate_request(struct scsi_tape *stp)
if (streq)
streq->stp = stp;
else {
- DEBC(printk(KERN_ERR "%s: Can't get SCSI request.\n",
- tape_name(stp)););
+ st_printk(KERN_ERR, stp,
+ "Can't get SCSI request.\n");
if (signal_pending(current))
stp->buffer->syscall_result = -EINTR;
else
@@ -525,8 +534,8 @@ st_do_scsi(struct st_request * SRpnt, struct scsi_tape * STp, unsigned char *cmd
/* if async, make sure there's no command outstanding */
if (!do_wait && ((STp->buffer)->last_SRpnt)) {
- printk(KERN_ERR "%s: Async command already active.\n",
- tape_name(STp));
+ st_printk(KERN_ERR, STp,
+ "Async command already active.\n");
if (signal_pending(current))
(STp->buffer)->syscall_result = (-EINTR);
else
@@ -597,12 +606,12 @@ static int write_behind_check(struct scsi_tape * STp)
if (!STbuffer->writing)
return 0;
- DEB(
+ DEB(
if (STp->write_pending)
STp->nbr_waits++;
else
STp->nbr_finished++;
- ) /* end DEB */
+ ) /* end DEB */
wait_for_completion(&(STp->wait));
SRpnt = STbuffer->last_SRpnt;
@@ -639,8 +648,9 @@ static int write_behind_check(struct scsi_tape * STp)
STbuffer->writing = 0;
DEB(if (debugging && retval)
- printk(ST_DEB_MSG "%s: Async write error %x, return value %d.\n",
- tape_name(STp), STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */
+ st_printk(ST_DEB_MSG, STp,
+ "Async write error %x, return value %d.\n",
+ STbuffer->cmdstat.midlevel_result, retval);) /* end DEB */
return retval;
}
@@ -662,8 +672,8 @@ static int cross_eof(struct scsi_tape * STp, int forward)
cmd[2] = cmd[3] = cmd[4] = 0xff; /* -1 filemarks */
cmd[5] = 0;
- DEBC(printk(ST_DEB_MSG "%s: Stepping over filemark %s.\n",
- tape_name(STp), forward ? "forward" : "backward"));
+ DEBC_printk(STp, "Stepping over filemark %s.\n",
+ forward ? "forward" : "backward");
SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
STp->device->request_queue->rq_timeout,
@@ -675,8 +685,9 @@ static int cross_eof(struct scsi_tape * STp, int forward)
SRpnt = NULL;
if ((STp->buffer)->cmdstat.midlevel_result != 0)
- printk(KERN_ERR "%s: Stepping over filemark %s failed.\n",
- tape_name(STp), forward ? "forward" : "backward");
+ st_printk(KERN_ERR, STp,
+ "Stepping over filemark %s failed.\n",
+ forward ? "forward" : "backward");
return (STp->buffer)->syscall_result;
}
@@ -699,8 +710,7 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
if (STp->dirty == 1) {
transfer = STp->buffer->buffer_bytes;
- DEBC(printk(ST_DEB_MSG "%s: Flushing %d bytes.\n",
- tape_name(STp), transfer));
+ DEBC_printk(STp, "Flushing %d bytes.\n", transfer);
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_6;
@@ -732,8 +742,7 @@ static int st_flush_write_buffer(struct scsi_tape * STp)
STps->drv_block += blks;
result = (-ENOSPC);
} else {
- printk(KERN_ERR "%s: Error on flush.\n",
- tape_name(STp));
+ st_printk(KERN_ERR, STp, "Error on flush.\n");
STps->drv_block = (-1);
result = (-EIO);
}
@@ -811,7 +820,6 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm)
{
int set_it = 0;
unsigned long arg;
- char *name = tape_name(STp);
if (!STp->density_changed &&
STm->default_density >= 0 &&
@@ -830,9 +838,10 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm)
arg |= STp->block_size;
if (set_it &&
st_int_ioctl(STp, SET_DENS_AND_BLK, arg)) {
- printk(KERN_WARNING
- "%s: Can't set default block size to %d bytes and density %x.\n",
- name, STm->default_blksize, STm->default_density);
+ st_printk(KERN_WARNING, STp,
+ "Can't set default block size to %d bytes "
+ "and density %x.\n",
+ STm->default_blksize, STm->default_density);
if (modes_defined)
return (-EINVAL);
}
@@ -844,12 +853,9 @@ static int set_mode_densblk(struct scsi_tape * STp, struct st_modedef * STm)
static int do_door_lock(struct scsi_tape * STp, int do_lock)
{
int retval, cmd;
- DEB(char *name = tape_name(STp);)
-
cmd = do_lock ? SCSI_IOCTL_DOORLOCK : SCSI_IOCTL_DOORUNLOCK;
- DEBC(printk(ST_DEB_MSG "%s: %socking drive door.\n", name,
- do_lock ? "L" : "Unl"));
+ DEBC_printk(STp, "%socking drive door.\n", do_lock ? "L" : "Unl");
retval = scsi_ioctl(STp->device, cmd, NULL);
if (!retval) {
STp->door_locked = do_lock ? ST_LOCKED_EXPLICIT : ST_UNLOCKED;
@@ -976,15 +982,14 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
struct st_request *SRpnt = NULL;
struct st_modedef *STm;
struct st_partstat *STps;
- char *name = tape_name(STp);
struct inode *inode = file_inode(filp);
int mode = TAPE_MODE(inode);
STp->ready = ST_READY;
if (mode != STp->current_mode) {
- DEBC(printk(ST_DEB_MSG "%s: Mode change from %d to %d.\n",
- name, STp->current_mode, mode));
+ DEBC_printk(STp, "Mode change from %d to %d.\n",
+ STp->current_mode, mode);
new_session = 1;
STp->current_mode = mode;
}
@@ -1055,13 +1060,12 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
STp->min_block = ((STp->buffer)->b_data[4] << 8) |
(STp->buffer)->b_data[5];
if ( DEB( debugging || ) !STp->inited)
- printk(KERN_INFO
- "%s: Block limits %d - %d bytes.\n", name,
- STp->min_block, STp->max_block);
+ st_printk(KERN_INFO, STp,
+ "Block limits %d - %d bytes.\n",
+ STp->min_block, STp->max_block);
} else {
STp->min_block = STp->max_block = (-1);
- DEBC(printk(ST_DEB_MSG "%s: Can't read block limits.\n",
- name));
+ DEBC_printk(STp, "Can't read block limits.\n");
}
}
@@ -1078,56 +1082,58 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
}
if ((STp->buffer)->syscall_result != 0) {
- DEBC(printk(ST_DEB_MSG "%s: No Mode Sense.\n", name));
+ DEBC_printk(STp, "No Mode Sense.\n");
STp->block_size = ST_DEFAULT_BLOCK; /* Educated guess (?) */
(STp->buffer)->syscall_result = 0; /* Prevent error propagation */
STp->drv_write_prot = 0;
} else {
- DEBC(printk(ST_DEB_MSG
- "%s: Mode sense. Length %d, medium %x, WBS %x, BLL %d\n",
- name,
- (STp->buffer)->b_data[0], (STp->buffer)->b_data[1],
- (STp->buffer)->b_data[2], (STp->buffer)->b_data[3]));
+ DEBC_printk(STp,"Mode sense. Length %d, "
+ "medium %x, WBS %x, BLL %d\n",
+ (STp->buffer)->b_data[0],
+ (STp->buffer)->b_data[1],
+ (STp->buffer)->b_data[2],
+ (STp->buffer)->b_data[3]);
if ((STp->buffer)->b_data[3] >= 8) {
STp->drv_buffer = ((STp->buffer)->b_data[2] >> 4) & 7;
STp->density = (STp->buffer)->b_data[4];
STp->block_size = (STp->buffer)->b_data[9] * 65536 +
(STp->buffer)->b_data[10] * 256 + (STp->buffer)->b_data[11];
- DEBC(printk(ST_DEB_MSG
- "%s: Density %x, tape length: %x, drv buffer: %d\n",
- name, STp->density, (STp->buffer)->b_data[5] * 65536 +
- (STp->buffer)->b_data[6] * 256 + (STp->buffer)->b_data[7],
- STp->drv_buffer));
+ DEBC_printk(STp, "Density %x, tape length: %x, "
+ "drv buffer: %d\n",
+ STp->density,
+ (STp->buffer)->b_data[5] * 65536 +
+ (STp->buffer)->b_data[6] * 256 +
+ (STp->buffer)->b_data[7],
+ STp->drv_buffer);
}
STp->drv_write_prot = ((STp->buffer)->b_data[2] & 0x80) != 0;
if (!STp->drv_buffer && STp->immediate_filemark) {
- printk(KERN_WARNING
- "%s: non-buffered tape: disabling writing immediate filemarks\n",
- name);
+ st_printk(KERN_WARNING, STp,
+ "non-buffered tape: disabling "
+ "writing immediate filemarks\n");
STp->immediate_filemark = 0;
}
}
st_release_request(SRpnt);
SRpnt = NULL;
- STp->inited = 1;
+ STp->inited = 1;
if (STp->block_size > 0)
(STp->buffer)->buffer_blocks =
- (STp->buffer)->buffer_size / STp->block_size;
+ (STp->buffer)->buffer_size / STp->block_size;
else
(STp->buffer)->buffer_blocks = 1;
(STp->buffer)->buffer_bytes = (STp->buffer)->read_pointer = 0;
- DEBC(printk(ST_DEB_MSG
- "%s: Block size: %d, buffer size: %d (%d blocks).\n", name,
- STp->block_size, (STp->buffer)->buffer_size,
- (STp->buffer)->buffer_blocks));
+ DEBC_printk(STp, "Block size: %d, buffer size: %d (%d blocks).\n",
+ STp->block_size, (STp->buffer)->buffer_size,
+ (STp->buffer)->buffer_blocks);
if (STp->drv_write_prot) {
STp->write_prot = 1;
- DEBC(printk(ST_DEB_MSG "%s: Write protected\n", name));
+ DEBC_printk(STp, "Write protected\n");
if (do_wait &&
((st_flags & O_ACCMODE) == O_WRONLY ||
@@ -1141,8 +1147,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
/* This code is reached when the device is opened for the first time
after the driver has been initialized with tape in the drive and the
partition support has been enabled. */
- DEBC(printk(ST_DEB_MSG
- "%s: Updating partition number in status.\n", name));
+ DEBC_printk(STp, "Updating partition number in status.\n");
if ((STp->partition = find_partition(STp)) < 0) {
retval = STp->partition;
goto err_out;
@@ -1160,9 +1165,10 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
if (STp->default_drvbuffer != 0xff) {
if (st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer))
- printk(KERN_WARNING
- "%s: Can't set default drive buffering to %d.\n",
- name, STp->default_drvbuffer);
+ st_printk(KERN_WARNING, STp,
+ "Can't set default drive "
+ "buffering to %d.\n",
+ STp->default_drvbuffer);
}
}
@@ -1182,7 +1188,6 @@ static int st_open(struct inode *inode, struct file *filp)
struct scsi_tape *STp;
struct st_partstat *STps;
int dev = TAPE_NR(inode);
- char *name;
/*
* We really want to do nonseekable_open(inode, filp); here, but some
@@ -1196,13 +1201,12 @@ static int st_open(struct inode *inode, struct file *filp)
}
filp->private_data = STp;
- name = tape_name(STp);
spin_lock(&st_use_lock);
if (STp->in_use) {
spin_unlock(&st_use_lock);
scsi_tape_put(STp);
- DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); )
+ DEBC_printk(STp, "Device already in use.\n");
return (-EBUSY);
}
@@ -1222,8 +1226,8 @@ static int st_open(struct inode *inode, struct file *filp)
/* See that we have at least a one page buffer available */
if (!enlarge_buffer(STp->buffer, PAGE_SIZE, STp->restr_dma)) {
- printk(KERN_WARNING "%s: Can't allocate one page tape buffer.\n",
- name);
+ st_printk(KERN_WARNING, STp,
+ "Can't allocate one page tape buffer.\n");
retval = (-EOVERFLOW);
goto err_out;
}
@@ -1279,7 +1283,6 @@ static int st_flush(struct file *filp, fl_owner_t id)
struct scsi_tape *STp = filp->private_data;
struct st_modedef *STm = &(STp->modes[STp->current_mode]);
struct st_partstat *STps = &(STp->ps[STp->partition]);
- char *name = tape_name(STp);
if (file_count(filp) > 1)
return 0;
@@ -1292,24 +1295,25 @@ static int st_flush(struct file *filp, fl_owner_t id)
if (STp->can_partitions &&
(result2 = switch_partition(STp)) < 0) {
- DEBC(printk(ST_DEB_MSG
- "%s: switch_partition at close failed.\n", name));
+ DEBC_printk(STp, "switch_partition at close failed.\n");
if (result == 0)
result = result2;
goto out;
}
DEBC( if (STp->nbr_requests)
- printk(KERN_DEBUG "%s: Number of r/w requests %d, dio used in %d, pages %d.\n",
- name, STp->nbr_requests, STp->nbr_dio, STp->nbr_pages));
+ st_printk(KERN_DEBUG, STp,
+ "Number of r/w requests %d, dio used in %d, "
+ "pages %d.\n", STp->nbr_requests, STp->nbr_dio,
+ STp->nbr_pages));
if (STps->rw == ST_WRITING && !STp->pos_unknown) {
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
- DEBC(printk(ST_DEB_MSG "%s: Async write waits %d, finished %d.\n",
- name, STp->nbr_waits, STp->nbr_finished);
- )
-
+#if DEBUG
+ DEBC_printk(STp, "Async write waits %d, finished %d.\n",
+ STp->nbr_waits, STp->nbr_finished);
+#endif
memset(cmd, 0, MAX_COMMAND_SIZE);
cmd[0] = WRITE_FILEMARKS;
if (STp->immediate_filemark)
@@ -1343,13 +1347,13 @@ static int st_flush(struct file *filp, fl_owner_t id)
else { /* Write error */
st_release_request(SRpnt);
SRpnt = NULL;
- printk(KERN_ERR "%s: Error on write filemark.\n", name);
+ st_printk(KERN_ERR, STp,
+ "Error on write filemark.\n");
if (result == 0)
result = (-EIO);
}
- DEBC(printk(ST_DEB_MSG "%s: Buffer flushed, %d EOF(s) written\n",
- name, cmd[4]));
+ DEBC_printk(STp, "Buffer flushed, %d EOF(s) written\n", cmd[4]);
} else if (!STp->rew_at_close) {
STps = &(STp->ps[STp->partition]);
if (!STm->sysv || STps->rw != ST_READING) {
@@ -1447,9 +1451,10 @@ static ssize_t rw_checks(struct scsi_tape *STp, struct file *filp, size_t count)
if (count == 0)
goto out;
- DEB(
+ DEB(
if (!STp->in_use) {
- printk(ST_DEB_MSG "%s: Incorrect device.\n", tape_name(STp));
+ st_printk(ST_DEB_MSG, STp,
+ "Incorrect device.\n");
retval = (-EIO);
goto out;
} ) /* end DEB */
@@ -1519,8 +1524,9 @@ static int setup_buffering(struct scsi_tape *STp, const char __user *buf,
if (bufsize > STbp->buffer_size &&
!enlarge_buffer(STbp, bufsize, STp->restr_dma)) {
- printk(KERN_WARNING "%s: Can't allocate %d byte tape buffer.\n",
- tape_name(STp), bufsize);
+ st_printk(KERN_WARNING, STp,
+ "Can't allocate %d byte tape buffer.\n",
+ bufsize);
retval = (-EOVERFLOW);
goto out;
}
@@ -1563,7 +1569,6 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
struct st_modedef *STm;
struct st_partstat *STps;
struct st_buffer *STbp;
- char *name = tape_name(STp);
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
@@ -1574,8 +1579,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
/* Write must be integral number of blocks */
if (STp->block_size != 0 && (count % STp->block_size) != 0) {
- printk(KERN_WARNING "%s: Write not multiple of tape block size.\n",
- name);
+ st_printk(KERN_WARNING, STp,
+ "Write not multiple of tape block size.\n");
retval = (-EINVAL);
goto out;
}
@@ -1601,8 +1606,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (STm->default_compression != ST_DONT_TOUCH &&
!(STp->compression_changed)) {
if (st_compression(STp, (STm->default_compression == ST_YES))) {
- printk(KERN_WARNING "%s: Can't set default compression.\n",
- name);
+ st_printk(KERN_WARNING, STp,
+ "Can't set default compression.\n");
if (modes_defined) {
retval = (-EINVAL);
goto out;
@@ -1723,7 +1728,7 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (STbp->syscall_result != 0) {
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
- DEBC(printk(ST_DEB_MSG "%s: Error on write:\n", name));
+ DEBC_printk(STp, "Error on write:\n");
if (cmdstatp->have_sense && (cmdstatp->flags & SENSE_EOM)) {
scode = cmdstatp->sense_hdr.sense_key;
if (cmdstatp->remainder_valid)
@@ -1750,9 +1755,9 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
if (STp->block_size == 0 ||
undone > 0 || count == 0)
retval = (-ENOSPC); /* EOM within current request */
- DEBC(printk(ST_DEB_MSG
- "%s: EOM with %d bytes unwritten.\n",
- name, (int)count));
+ DEBC_printk(STp, "EOM with %d "
+ "bytes unwritten.\n",
+ (int)count);
} else {
/* EOT within data buffered earlier (possible only
in fixed block mode without direct i/o) */
@@ -1765,9 +1770,10 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
STp->block_size;
}
STps->eof = ST_EOM_OK;
- DEBC(printk(ST_DEB_MSG
- "%s: Retry write of %d bytes at EOM.\n",
- name, STp->buffer->buffer_bytes));
+ DEBC_printk(STp, "Retry "
+ "write of %d "
+ "bytes at EOM.\n",
+ STp->buffer->buffer_bytes);
goto retry_write;
}
else {
@@ -1778,9 +1784,8 @@ st_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
STps->eof = ST_EOM_ERROR;
STps->drv_block = (-1); /* Too cautious? */
retval = (-EIO); /* EOM for old data */
- DEBC(printk(ST_DEB_MSG
- "%s: EOM with lost data.\n",
- name));
+ DEBC_printk(STp, "EOM with "
+ "lost data.\n");
}
}
} else {
@@ -1839,7 +1844,6 @@ static long read_tape(struct scsi_tape *STp, long count,
struct st_partstat *STps;
struct st_buffer *STbp;
int retval = 0;
- char *name = tape_name(STp);
if (count == 0)
return 0;
@@ -1891,12 +1895,12 @@ static long read_tape(struct scsi_tape *STp, long count,
struct st_cmdstatus *cmdstatp = &STp->buffer->cmdstat;
retval = 1;
- DEBC(printk(ST_DEB_MSG "%s: Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
- name,
- SRpnt->sense[0], SRpnt->sense[1],
- SRpnt->sense[2], SRpnt->sense[3],
- SRpnt->sense[4], SRpnt->sense[5],
- SRpnt->sense[6], SRpnt->sense[7]));
+ DEBC_printk(STp,
+ "Sense: %2x %2x %2x %2x %2x %2x %2x %2x\n",
+ SRpnt->sense[0], SRpnt->sense[1],
+ SRpnt->sense[2], SRpnt->sense[3],
+ SRpnt->sense[4], SRpnt->sense[5],
+ SRpnt->sense[6], SRpnt->sense[7]);
if (cmdstatp->have_sense) {
if (cmdstatp->sense_hdr.sense_key == BLANK_CHECK)
@@ -1913,23 +1917,27 @@ static long read_tape(struct scsi_tape *STp, long count,
transfer = bytes;
if (cmdstatp->flags & SENSE_ILI) { /* ILI */
- if (STp->block_size == 0) {
- if (transfer <= 0) {
- if (transfer < 0)
- printk(KERN_NOTICE
- "%s: Failed to read %d byte block with %d byte transfer.\n",
- name, bytes - transfer, bytes);
- if (STps->drv_block >= 0)
- STps->drv_block += 1;
- STbp->buffer_bytes = 0;
- return (-ENOMEM);
- }
+ if (STp->block_size == 0 &&
+ transfer < 0) {
+ st_printk(KERN_NOTICE, STp,
+ "Failed to read %d "
+ "byte block with %d "
+ "byte transfer.\n",
+ bytes - transfer,
+ bytes);
+ if (STps->drv_block >= 0)
+ STps->drv_block += 1;
+ STbp->buffer_bytes = 0;
+ return (-ENOMEM);
+ } else if (STp->block_size == 0) {
STbp->buffer_bytes = bytes - transfer;
} else {
st_release_request(SRpnt);
SRpnt = *aSRpnt = NULL;
if (transfer == blks) { /* We did not get anything, error */
- printk(KERN_NOTICE "%s: Incorrect block size.\n", name);
+ st_printk(KERN_NOTICE, STp,
+ "Incorrect "
+ "block size.\n");
if (STps->drv_block >= 0)
STps->drv_block += blks - transfer + 1;
st_int_ioctl(STp, MTBSR, 1);
@@ -1938,9 +1946,11 @@ static long read_tape(struct scsi_tape *STp, long count,
/* We have some data, deliver it */
STbp->buffer_bytes = (blks - transfer) *
STp->block_size;
- DEBC(printk(ST_DEB_MSG
- "%s: ILI but enough data received %ld %d.\n",
- name, count, STbp->buffer_bytes));
+ DEBC_printk(STp, "ILI but "
+ "enough data "
+ "received %ld "
+ "%d.\n", count,
+ STbp->buffer_bytes);
if (STps->drv_block >= 0)
STps->drv_block += 1;
if (st_int_ioctl(STp, MTBSR, 1))
@@ -1956,9 +1966,9 @@ static long read_tape(struct scsi_tape *STp, long count,
else
STbp->buffer_bytes =
bytes - transfer * STp->block_size;
- DEBC(printk(ST_DEB_MSG
- "%s: EOF detected (%d bytes read).\n",
- name, STbp->buffer_bytes));
+ DEBC_printk(STp, "EOF detected (%d "
+ "bytes read).\n",
+ STbp->buffer_bytes);
} else if (cmdstatp->flags & SENSE_EOM) {
if (STps->eof == ST_FM)
STps->eof = ST_EOD_1;
@@ -1970,20 +1980,20 @@ static long read_tape(struct scsi_tape *STp, long count,
STbp->buffer_bytes =
bytes - transfer * STp->block_size;
- DEBC(printk(ST_DEB_MSG "%s: EOM detected (%d bytes read).\n",
- name, STbp->buffer_bytes));
+ DEBC_printk(STp, "EOM detected (%d "
+ "bytes read).\n",
+ STbp->buffer_bytes);
}
}
- /* end of EOF, EOM, ILI test */
+ /* end of EOF, EOM, ILI test */
else { /* nonzero sense key */
- DEBC(printk(ST_DEB_MSG
- "%s: Tape error while reading.\n", name));
+ DEBC_printk(STp, "Tape error while reading.\n");
STps->drv_block = (-1);
if (STps->eof == ST_FM &&
cmdstatp->sense_hdr.sense_key == BLANK_CHECK) {
- DEBC(printk(ST_DEB_MSG
- "%s: Zero returned for first BLANK CHECK after EOF.\n",
- name));
+ DEBC_printk(STp, "Zero returned for "
+ "first BLANK CHECK "
+ "after EOF.\n");
STps->eof = ST_EOD_2; /* First BLANK_CHECK after FM */
} else /* Some other extended sense code */
retval = (-EIO);
@@ -1992,13 +2002,13 @@ static long read_tape(struct scsi_tape *STp, long count,
if (STbp->buffer_bytes < 0) /* Caused by bogus sense data */
STbp->buffer_bytes = 0;
}
- /* End of extended sense test */
+ /* End of extended sense test */
else { /* Non-extended sense */
retval = STbp->syscall_result;
}
}
- /* End of error handling */
+ /* End of error handling */
else { /* Read successful */
STbp->buffer_bytes = bytes;
if (STp->sili) /* In fixed block mode residual is always zero here */
@@ -2028,7 +2038,6 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
struct st_modedef *STm;
struct st_partstat *STps;
struct st_buffer *STbp = STp->buffer;
- DEB( char *name = tape_name(STp); )
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
@@ -2053,11 +2062,12 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
goto out;
STps->rw = ST_READING;
}
- DEB(
+ DEB(
if (debugging && STps->eof != ST_NOEOF)
- printk(ST_DEB_MSG "%s: EOF/EOM flag up (%d). Bytes %d\n", name,
- STps->eof, STbp->buffer_bytes);
- ) /* end DEB */
+ st_printk(ST_DEB_MSG, STp,
+ "EOF/EOM flag up (%d). Bytes %d\n",
+ STps->eof, STbp->buffer_bytes);
+ ) /* end DEB */
retval = setup_buffering(STp, buf, count, 1);
if (retval)
@@ -2104,13 +2114,13 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
/* Move the data from driver buffer to user buffer */
if (STbp->buffer_bytes > 0) {
- DEB(
+ DEB(
if (debugging && STps->eof != ST_NOEOF)
- printk(ST_DEB_MSG
- "%s: EOF up (%d). Left %d, needed %d.\n", name,
- STps->eof, STbp->buffer_bytes,
- (int)(count - total));
- ) /* end DEB */
+ st_printk(ST_DEB_MSG, STp,
+ "EOF up (%d). Left %d, needed %d.\n",
+ STps->eof, STbp->buffer_bytes,
+ (int)(count - total));
+ ) /* end DEB */
transfer = STbp->buffer_bytes < count - total ?
STbp->buffer_bytes : count - total;
if (!do_dio) {
@@ -2166,26 +2176,30 @@ st_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
DEB(
/* Set the driver options */
-static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm, char *name)
+static void st_log_options(struct scsi_tape * STp, struct st_modedef * STm)
{
if (debugging) {
- printk(KERN_INFO
- "%s: Mode %d options: buffer writes: %d, async writes: %d, read ahead: %d\n",
- name, STp->current_mode, STm->do_buffer_writes, STm->do_async_writes,
- STm->do_read_ahead);
- printk(KERN_INFO
- "%s: can bsr: %d, two FMs: %d, fast mteom: %d, auto lock: %d,\n",
- name, STp->can_bsr, STp->two_fm, STp->fast_mteom, STp->do_auto_lock);
- printk(KERN_INFO
- "%s: defs for wr: %d, no block limits: %d, partitions: %d, s2 log: %d\n",
- name, STm->defaults_for_writes, STp->omit_blklims, STp->can_partitions,
- STp->scsi2_logical);
- printk(KERN_INFO
- "%s: sysv: %d nowait: %d sili: %d nowait_filemark: %d\n",
- name, STm->sysv, STp->immediate, STp->sili,
- STp->immediate_filemark);
- printk(KERN_INFO "%s: debugging: %d\n",
- name, debugging);
+ st_printk(KERN_INFO, STp,
+ "Mode %d options: buffer writes: %d, "
+ "async writes: %d, read ahead: %d\n",
+ STp->current_mode, STm->do_buffer_writes,
+ STm->do_async_writes, STm->do_read_ahead);
+ st_printk(KERN_INFO, STp,
+ " can bsr: %d, two FMs: %d, "
+ "fast mteom: %d, auto lock: %d,\n",
+ STp->can_bsr, STp->two_fm, STp->fast_mteom,
+ STp->do_auto_lock);
+ st_printk(KERN_INFO, STp,
+ " defs for wr: %d, no block limits: %d, "
+ "partitions: %d, s2 log: %d\n",
+ STm->defaults_for_writes, STp->omit_blklims,
+ STp->can_partitions, STp->scsi2_logical);
+ st_printk(KERN_INFO, STp,
+ " sysv: %d nowait: %d sili: %d "
+ "nowait_filemark: %d\n",
+ STm->sysv, STp->immediate, STp->sili,
+ STp->immediate_filemark);
+ st_printk(KERN_INFO, STp, " debugging: %d\n", debugging);
}
}
)
@@ -2196,7 +2210,6 @@ static int st_set_options(struct scsi_tape *STp, long options)
int value;
long code;
struct st_modedef *STm;
- char *name = tape_name(STp);
struct cdev *cd0, *cd1;
struct device *d0, *d1;
@@ -2212,9 +2225,8 @@ static int st_set_options(struct scsi_tape *STp, long options)
STm->devs[0] = d0;
STm->devs[1] = d1;
modes_defined = 1;
- DEBC(printk(ST_DEB_MSG
- "%s: Initialized mode %d definition from mode 0\n",
- name, STp->current_mode));
+ DEBC_printk(STp, "Initialized mode %d definition from mode 0\n",
+ STp->current_mode);
}
code = options & MT_ST_OPTIONS;
@@ -2236,7 +2248,7 @@ static int st_set_options(struct scsi_tape *STp, long options)
STm->sysv = (options & MT_ST_SYSV) != 0;
STp->sili = (options & MT_ST_SILI) != 0;
DEB( debugging = (options & MT_ST_DEBUGGING) != 0;
- st_log_options(STp, STm, name); )
+ st_log_options(STp, STm); )
} else if (code == MT_ST_SETBOOLEANS || code == MT_ST_CLEARBOOLEANS) {
value = (code == MT_ST_SETBOOLEANS);
if ((options & MT_ST_BUFFER_WRITES) != 0)
@@ -2270,21 +2282,21 @@ static int st_set_options(struct scsi_tape *STp, long options)
STm->sysv = value;
if ((options & MT_ST_SILI) != 0)
STp->sili = value;
- DEB(
+ DEB(
if ((options & MT_ST_DEBUGGING) != 0)
debugging = value;
- st_log_options(STp, STm, name); )
+ st_log_options(STp, STm); )
} else if (code == MT_ST_WRITE_THRESHOLD) {
/* Retained for compatibility */
} else if (code == MT_ST_DEF_BLKSIZE) {
value = (options & ~MT_ST_OPTIONS);
if (value == ~MT_ST_OPTIONS) {
STm->default_blksize = (-1);
- DEBC( printk(KERN_INFO "%s: Default block size disabled.\n", name));
+ DEBC_printk(STp, "Default block size disabled.\n");
} else {
STm->default_blksize = value;
- DEBC( printk(KERN_INFO "%s: Default block size set to %d bytes.\n",
- name, STm->default_blksize));
+ DEBC_printk(STp,"Default block size set to "
+ "%d bytes.\n", STm->default_blksize);
if (STp->ready == ST_READY) {
STp->blksize_changed = 0;
set_mode_densblk(STp, STm);
@@ -2294,13 +2306,13 @@ static int st_set_options(struct scsi_tape *STp, long options)
value = (options & ~MT_ST_OPTIONS);
if ((value & MT_ST_SET_LONG_TIMEOUT) != 0) {
STp->long_timeout = (value & ~MT_ST_SET_LONG_TIMEOUT) * HZ;
- DEBC( printk(KERN_INFO "%s: Long timeout set to %d seconds.\n", name,
- (value & ~MT_ST_SET_LONG_TIMEOUT)));
+ DEBC_printk(STp, "Long timeout set to %d seconds.\n",
+ (value & ~MT_ST_SET_LONG_TIMEOUT));
} else {
blk_queue_rq_timeout(STp->device->request_queue,
value * HZ);
- DEBC( printk(KERN_INFO "%s: Normal timeout set to %d seconds.\n",
- name, value) );
+ DEBC_printk(STp, "Normal timeout set to %d seconds.\n",
+ value);
}
} else if (code == MT_ST_SET_CLN) {
value = (options & ~MT_ST_OPTIONS) & 0xff;
@@ -2311,21 +2323,21 @@ static int st_set_options(struct scsi_tape *STp, long options)
STp->cln_mode = value;
STp->cln_sense_mask = (options >> 8) & 0xff;
STp->cln_sense_value = (options >> 16) & 0xff;
- printk(KERN_INFO
- "%s: Cleaning request mode %d, mask %02x, value %02x\n",
- name, value, STp->cln_sense_mask, STp->cln_sense_value);
+ st_printk(KERN_INFO, STp,
+ "Cleaning request mode %d, mask %02x, value %02x\n",
+ value, STp->cln_sense_mask, STp->cln_sense_value);
} else if (code == MT_ST_DEF_OPTIONS) {
code = (options & ~MT_ST_CLEAR_DEFAULT);
value = (options & MT_ST_CLEAR_DEFAULT);
if (code == MT_ST_DEF_DENSITY) {
if (value == MT_ST_CLEAR_DEFAULT) {
STm->default_density = (-1);
- DEBC( printk(KERN_INFO "%s: Density default disabled.\n",
- name));
+ DEBC_printk(STp,
+ "Density default disabled.\n");
} else {
STm->default_density = value & 0xff;
- DEBC( printk(KERN_INFO "%s: Density default set to %x\n",
- name, STm->default_density));
+ DEBC_printk(STp, "Density default set to %x\n",
+ STm->default_density);
if (STp->ready == ST_READY) {
STp->density_changed = 0;
set_mode_densblk(STp, STm);
@@ -2334,31 +2346,33 @@ static int st_set_options(struct scsi_tape *STp, long options)
} else if (code == MT_ST_DEF_DRVBUFFER) {
if (value == MT_ST_CLEAR_DEFAULT) {
STp->default_drvbuffer = 0xff;
- DEBC( printk(KERN_INFO
- "%s: Drive buffer default disabled.\n", name));
+ DEBC_printk(STp,
+ "Drive buffer default disabled.\n");
} else {
STp->default_drvbuffer = value & 7;
- DEBC( printk(KERN_INFO
- "%s: Drive buffer default set to %x\n",
- name, STp->default_drvbuffer));
+ DEBC_printk(STp,
+ "Drive buffer default set to %x\n",
+ STp->default_drvbuffer);
if (STp->ready == ST_READY)
st_int_ioctl(STp, MTSETDRVBUFFER, STp->default_drvbuffer);
}
} else if (code == MT_ST_DEF_COMPRESSION) {
if (value == MT_ST_CLEAR_DEFAULT) {
STm->default_compression = ST_DONT_TOUCH;
- DEBC( printk(KERN_INFO
- "%s: Compression default disabled.\n", name));
+ DEBC_printk(STp,
+ "Compression default disabled.\n");
} else {
if ((value & 0xff00) != 0) {
STp->c_algo = (value & 0xff00) >> 8;
- DEBC( printk(KERN_INFO "%s: Compression algorithm set to 0x%x.\n",
- name, STp->c_algo));
+ DEBC_printk(STp, "Compression "
+ "algorithm set to 0x%x.\n",
+ STp->c_algo);
}
if ((value & 0xff) != 0xff) {
STm->default_compression = (value & 1 ? ST_YES : ST_NO);
- DEBC( printk(KERN_INFO "%s: Compression default set to %x\n",
- name, (value & 1)));
+ DEBC_printk(STp, "Compression default "
+ "set to %x\n",
+ (value & 1));
if (STp->ready == ST_READY) {
STp->compression_changed = 0;
st_compression(STp, (STm->default_compression == ST_YES));
@@ -2473,7 +2487,6 @@ static int st_compression(struct scsi_tape * STp, int state)
int retval;
int mpoffs; /* Offset to mode page start */
unsigned char *b_data = (STp->buffer)->b_data;
- DEB( char *name = tape_name(STp); )
if (STp->ready != ST_READY)
return (-EIO);
@@ -2481,18 +2494,17 @@ static int st_compression(struct scsi_tape * STp, int state)
/* Read the current page contents */
retval = read_mode_page(STp, COMPRESSION_PAGE, 0);
if (retval) {
- DEBC(printk(ST_DEB_MSG "%s: Compression mode page not supported.\n",
- name));
+ DEBC_printk(STp, "Compression mode page not supported.\n");
return (-EIO);
}
mpoffs = MODE_HEADER_LENGTH + b_data[MH_OFF_BDESCS_LENGTH];
- DEBC(printk(ST_DEB_MSG "%s: Compression state is %d.\n", name,
- (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0)));
+ DEBC_printk(STp, "Compression state is %d.\n",
+ (b_data[mpoffs + CP_OFF_DCE_DCC] & DCE_MASK ? 1 : 0));
/* Check if compression can be changed */
if ((b_data[mpoffs + CP_OFF_DCE_DCC] & DCC_MASK) == 0) {
- DEBC(printk(ST_DEB_MSG "%s: Compression not supported.\n", name));
+ DEBC_printk(STp, "Compression not supported.\n");
return (-EIO);
}
@@ -2510,11 +2522,10 @@ static int st_compression(struct scsi_tape * STp, int state)
retval = write_mode_page(STp, COMPRESSION_PAGE, 0);
if (retval) {
- DEBC(printk(ST_DEB_MSG "%s: Compression change failed.\n", name));
+ DEBC_printk(STp, "Compression change failed.\n");
return (-EIO);
}
- DEBC(printk(ST_DEB_MSG "%s: Compression state changed to %d.\n",
- name, state));
+ DEBC_printk(STp, "Compression state changed to %d.\n", state);
STp->compression_changed = 1;
return 0;
@@ -2525,7 +2536,6 @@ static int st_compression(struct scsi_tape * STp, int state)
static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_code)
{
int retval = (-EIO), timeout;
- DEB( char *name = tape_name(STp); )
unsigned char cmd[MAX_COMMAND_SIZE];
struct st_partstat *STps;
struct st_request *SRpnt;
@@ -2546,9 +2556,9 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
*/
if (load_code >= 1 + MT_ST_HPLOADER_OFFSET
&& load_code <= 6 + MT_ST_HPLOADER_OFFSET) {
- DEBC(printk(ST_DEB_MSG "%s: Enhanced %sload slot %2d.\n",
- name, (cmd[4]) ? "" : "un",
- load_code - MT_ST_HPLOADER_OFFSET));
+ DEBC_printk(STp, " Enhanced %sload slot %2d.\n",
+ (cmd[4]) ? "" : "un",
+ load_code - MT_ST_HPLOADER_OFFSET);
cmd[3] = load_code - MT_ST_HPLOADER_OFFSET; /* MediaID field of C1553A */
}
if (STp->immediate) {
@@ -2560,9 +2570,9 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
DEBC(
if (!load_code)
- printk(ST_DEB_MSG "%s: Unloading tape.\n", name);
+ st_printk(ST_DEB_MSG, STp, "Unloading tape.\n");
else
- printk(ST_DEB_MSG "%s: Loading tape.\n", name);
+ st_printk(ST_DEB_MSG, STp, "Loading tape.\n");
);
SRpnt = st_do_scsi(NULL, STp, cmd, 0, DMA_NONE,
@@ -2597,17 +2607,24 @@ static int do_load_unload(struct scsi_tape *STp, struct file *filp, int load_cod
#if DEBUG
#define ST_DEB_FORWARD 0
#define ST_DEB_BACKWARD 1
-static void deb_space_print(char *name, int direction, char *units, unsigned char *cmd)
+static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd)
{
s32 sc;
+ if (!debugging)
+ return;
+
sc = cmd[2] & 0x80 ? 0xff000000 : 0;
sc |= (cmd[2] << 16) | (cmd[3] << 8) | cmd[4];
if (direction)
sc = -sc;
- printk(ST_DEB_MSG "%s: Spacing tape %s over %d %s.\n", name,
- direction ? "backward" : "forward", sc, units);
+ st_printk(ST_DEB_MSG, STp, "Spacing tape %s over %d %s.\n",
+ direction ? "backward" : "forward", sc, units);
}
+#else
+#define ST_DEB_FORWARD 0
+#define ST_DEB_BACKWARD 1
+static void deb_space_print(struct scsi_tape *STp, int direction, char *units, unsigned char *cmd) {}
#endif
@@ -2623,7 +2640,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
struct st_partstat *STps;
int fileno, blkno, at_sm, undone;
int datalen = 0, direction = DMA_NONE;
- char *name = tape_name(STp);
WARN_ON(STp->buffer->do_dio != 0);
if (STp->ready != ST_READY) {
@@ -2648,7 +2664,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[2] = (arg >> 16);
cmd[3] = (arg >> 8);
cmd[4] = arg;
- DEBC(deb_space_print(name, ST_DEB_FORWARD, "filemarks", cmd);)
+ deb_space_print(STp, ST_DEB_FORWARD, "filemarks", cmd);
if (fileno >= 0)
fileno += arg;
blkno = 0;
@@ -2663,7 +2679,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[2] = (ltmp >> 16);
cmd[3] = (ltmp >> 8);
cmd[4] = ltmp;
- DEBC(deb_space_print(name, ST_DEB_BACKWARD, "filemarks", cmd);)
+ deb_space_print(STp, ST_DEB_BACKWARD, "filemarks", cmd);
if (fileno >= 0)
fileno -= arg;
blkno = (-1); /* We can't know the block number */
@@ -2675,7 +2691,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[2] = (arg >> 16);
cmd[3] = (arg >> 8);
cmd[4] = arg;
- DEBC(deb_space_print(name, ST_DEB_FORWARD, "blocks", cmd);)
+ deb_space_print(STp, ST_DEB_FORWARD, "blocks", cmd);
if (blkno >= 0)
blkno += arg;
at_sm &= (arg == 0);
@@ -2687,7 +2703,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[2] = (ltmp >> 16);
cmd[3] = (ltmp >> 8);
cmd[4] = ltmp;
- DEBC(deb_space_print(name, ST_DEB_BACKWARD, "blocks", cmd);)
+ deb_space_print(STp, ST_DEB_BACKWARD, "blocks", cmd);
if (blkno >= 0)
blkno -= arg;
at_sm &= (arg == 0);
@@ -2698,7 +2714,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[2] = (arg >> 16);
cmd[3] = (arg >> 8);
cmd[4] = arg;
- DEBC(deb_space_print(name, ST_DEB_FORWARD, "setmarks", cmd);)
+ deb_space_print(STp, ST_DEB_FORWARD, "setmarks", cmd);
if (arg != 0) {
blkno = fileno = (-1);
at_sm = 1;
@@ -2711,7 +2727,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[2] = (ltmp >> 16);
cmd[3] = (ltmp >> 8);
cmd[4] = ltmp;
- DEBC(deb_space_print(name, ST_DEB_BACKWARD, "setmarks", cmd);)
+ deb_space_print(STp, ST_DEB_BACKWARD, "setmarks", cmd);
if (arg != 0) {
blkno = fileno = (-1);
at_sm = 1;
@@ -2732,13 +2748,19 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[3] = (arg >> 8);
cmd[4] = arg;
timeout = STp->device->request_queue->rq_timeout;
- DEBC(
- if (cmd_in != MTWSM)
- printk(ST_DEB_MSG "%s: Writing %d filemarks.\n", name,
- cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
- else
- printk(ST_DEB_MSG "%s: Writing %d setmarks.\n", name,
- cmd[2] * 65536 + cmd[3] * 256 + cmd[4]);
+ DEBC(
+ if (cmd_in != MTWSM)
+ st_printk(ST_DEB_MSG, STp,
+ "Writing %d filemarks.\n",
+ cmd[2] * 65536 +
+ cmd[3] * 256 +
+ cmd[4]);
+ else
+ st_printk(ST_DEB_MSG, STp,
+ "Writing %d setmarks.\n",
+ cmd[2] * 65536 +
+ cmd[3] * 256 +
+ cmd[4]);
)
if (fileno >= 0)
fileno += arg;
@@ -2751,11 +2773,11 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
cmd[1] = 1; /* Don't wait for completion */
timeout = STp->device->request_queue->rq_timeout;
}
- DEBC(printk(ST_DEB_MSG "%s: Rewinding tape.\n", name));
+ DEBC_printk(STp, "Rewinding tape.\n");
fileno = blkno = at_sm = 0;
break;
case MTNOP:
- DEBC(printk(ST_DEB_MSG "%s: No op on tape.\n", name));
+ DEBC_printk(STp, "No op on tape.\n");
return 0; /* Should do something ? */
break;
case MTRETEN:
@@ -2765,7 +2787,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
timeout = STp->device->request_queue->rq_timeout;
}
cmd[4] = 3;
- DEBC(printk(ST_DEB_MSG "%s: Retensioning tape.\n", name));
+ DEBC_printk(STp, "Retensioning tape.\n");
fileno = blkno = at_sm = 0;
break;
case MTEOM:
@@ -2783,8 +2805,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
fileno = (-1);
cmd[0] = SPACE;
cmd[1] = 3;
- DEBC(printk(ST_DEB_MSG "%s: Spacing to end of recorded medium.\n",
- name));
+ DEBC_printk(STp, "Spacing to end of recorded medium.\n");
blkno = -1;
at_sm = 0;
break;
@@ -2800,7 +2821,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
else
timeout = STp->long_timeout * 8;
- DEBC(printk(ST_DEB_MSG "%s: Erasing tape.\n", name));
+ DEBC_printk(STp, "Erasing tape.\n");
fileno = blkno = at_sm = 0;
break;
case MTSETBLK: /* Set block length */
@@ -2815,7 +2836,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
STp->max_block > 0 &&
((arg & MT_ST_BLKSIZE_MASK) < STp->min_block ||
(arg & MT_ST_BLKSIZE_MASK) > STp->max_block)) {
- printk(KERN_WARNING "%s: Illegal block size.\n", name);
+ st_printk(KERN_WARNING, STp, "Illegal block size.\n");
return (-EINVAL);
}
cmd[0] = MODE_SELECT;
@@ -2848,21 +2869,21 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
(STp->buffer)->b_data[10] = (ltmp >> 8);
(STp->buffer)->b_data[11] = ltmp;
timeout = STp->device->request_queue->rq_timeout;
- DEBC(
+ DEBC(
if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK)
- printk(ST_DEB_MSG
- "%s: Setting block size to %d bytes.\n", name,
- (STp->buffer)->b_data[9] * 65536 +
- (STp->buffer)->b_data[10] * 256 +
- (STp->buffer)->b_data[11]);
+ st_printk(ST_DEB_MSG, STp,
+ "Setting block size to %d bytes.\n",
+ (STp->buffer)->b_data[9] * 65536 +
+ (STp->buffer)->b_data[10] * 256 +
+ (STp->buffer)->b_data[11]);
if (cmd_in == MTSETDENSITY || cmd_in == SET_DENS_AND_BLK)
- printk(ST_DEB_MSG
- "%s: Setting density code to %x.\n", name,
- (STp->buffer)->b_data[4]);
+ st_printk(ST_DEB_MSG, STp,
+ "Setting density code to %x.\n",
+ (STp->buffer)->b_data[4]);
if (cmd_in == MTSETDRVBUFFER)
- printk(ST_DEB_MSG
- "%s: Setting drive buffer code to %d.\n", name,
- ((STp->buffer)->b_data[2] >> 4) & 7);
+ st_printk(ST_DEB_MSG, STp,
+ "Setting drive buffer code to %d.\n",
+ ((STp->buffer)->b_data[2] >> 4) & 7);
)
break;
default:
@@ -3019,7 +3040,6 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
int result;
unsigned char scmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt;
- DEB( char *name = tape_name(STp); )
if (STp->ready != ST_READY)
return (-EIO);
@@ -3043,7 +3063,7 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
(STp->device->scsi_level >= SCSI_2 &&
((STp->buffer)->b_data[0] & 4) != 0)) {
*block = *partition = 0;
- DEBC(printk(ST_DEB_MSG "%s: Can't read tape position.\n", name));
+ DEBC_printk(STp, " Can't read tape position.\n");
result = (-EIO);
} else {
result = 0;
@@ -3062,8 +3082,8 @@ static int get_location(struct scsi_tape *STp, unsigned int *block, int *partiti
(STp->buffer)->b_data[1] == 0) /* BOP of partition 0 */
STp->ps[0].drv_block = STp->ps[0].drv_file = 0;
}
- DEBC(printk(ST_DEB_MSG "%s: Got tape pos. blk %d part %d.\n", name,
- *block, *partition));
+ DEBC_printk(STp, "Got tape pos. blk %d part %d.\n",
+ *block, *partition);
}
st_release_request(SRpnt);
SRpnt = NULL;
@@ -3083,15 +3103,14 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
int timeout;
unsigned char scmd[MAX_COMMAND_SIZE];
struct st_request *SRpnt;
- DEB( char *name = tape_name(STp); )
if (STp->ready != ST_READY)
return (-EIO);
timeout = STp->long_timeout;
STps = &(STp->ps[STp->partition]);
- DEBC(printk(ST_DEB_MSG "%s: Setting block to %d and partition to %d.\n",
- name, block, partition));
+ DEBC_printk(STp, "Setting block to %d and partition to %d.\n",
+ block, partition);
DEB(if (partition < 0)
return (-EIO); )
@@ -3105,9 +3124,9 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
else {
STps->last_block_valid = 1;
STps->last_block_visited = blk;
- DEBC(printk(ST_DEB_MSG
- "%s: Visited block %d for partition %d saved.\n",
- name, blk, STp->partition));
+ DEBC_printk(STp, "Visited block %d for "
+ "partition %d saved.\n",
+ blk, STp->partition);
}
}
@@ -3129,9 +3148,9 @@ static int set_location(struct scsi_tape *STp, unsigned int block, int partition
if (STp->partition != partition) {
scmd[1] |= 2;
scmd[8] = partition;
- DEBC(printk(ST_DEB_MSG
- "%s: Trying to change partition from %d to %d\n",
- name, STp->partition, partition));
+ DEBC_printk(STp, "Trying to change partition "
+ "from %d to %d\n", STp->partition,
+ partition);
}
}
if (STp->immediate) {
@@ -3222,7 +3241,6 @@ static int switch_partition(struct scsi_tape *STp)
static int nbr_partitions(struct scsi_tape *STp)
{
int result;
- DEB( char *name = tape_name(STp); )
if (STp->ready != ST_READY)
return (-EIO);
@@ -3230,13 +3248,12 @@ static int nbr_partitions(struct scsi_tape *STp)
result = read_mode_page(STp, PART_PAGE, 1);
if (result) {
- DEBC(printk(ST_DEB_MSG "%s: Can't read medium partition page.\n",
- name));
+ DEBC_printk(STp, "Can't read medium partition page.\n");
result = (-EIO);
} else {
result = (STp->buffer)->b_data[MODE_HEADER_LENGTH +
PP_OFF_NBR_ADD_PARTS] + 1;
- DEBC(printk(ST_DEB_MSG "%s: Number of partitions %d.\n", name, result));
+ DEBC_printk(STp, "Number of partitions %d.\n", result);
}
return result;
@@ -3264,21 +3281,20 @@ static int nbr_partitions(struct scsi_tape *STp)
*/
static int partition_tape(struct scsi_tape *STp, int size)
{
- char *name = tape_name(STp);
int result;
int pgo, psd_cnt, psdo;
unsigned char *bp;
result = read_mode_page(STp, PART_PAGE, 0);
if (result) {
- DEBC(printk(ST_DEB_MSG "%s: Can't read partition mode page.\n", name));
+ DEBC_printk(STp, "Can't read partition mode page.\n");
return result;
}
/* The mode page is in the buffer. Let's modify it and write it. */
bp = (STp->buffer)->b_data;
pgo = MODE_HEADER_LENGTH + bp[MH_OFF_BDESCS_LENGTH];
- DEBC(printk(ST_DEB_MSG "%s: Partition page length is %d bytes.\n",
- name, bp[pgo + MP_OFF_PAGE_LENGTH] + 2));
+ DEBC_printk(STp, "Partition page length is %d bytes.\n",
+ bp[pgo + MP_OFF_PAGE_LENGTH] + 2);
psd_cnt = (bp[pgo + MP_OFF_PAGE_LENGTH] + 2 - PART_PAGE_FIXED_LENGTH) / 2;
psdo = pgo + PART_PAGE_FIXED_LENGTH;
@@ -3288,25 +3304,23 @@ static int partition_tape(struct scsi_tape *STp, int size)
}
memset(bp + psdo, 0, bp[pgo + PP_OFF_NBR_ADD_PARTS] * 2);
- DEBC(printk("%s: psd_cnt %d, max.parts %d, nbr_parts %d\n", name,
+ DEBC_printk(STp, "psd_cnt %d, max.parts %d, nbr_parts %d\n",
psd_cnt, bp[pgo + PP_OFF_MAX_ADD_PARTS],
- bp[pgo + PP_OFF_NBR_ADD_PARTS]));
+ bp[pgo + PP_OFF_NBR_ADD_PARTS]);
if (size <= 0) {
bp[pgo + PP_OFF_NBR_ADD_PARTS] = 0;
if (psd_cnt <= bp[pgo + PP_OFF_MAX_ADD_PARTS])
bp[pgo + MP_OFF_PAGE_LENGTH] = 6;
- DEBC(printk(ST_DEB_MSG "%s: Formatting tape with one partition.\n",
- name));
+ DEBC_printk(STp, "Formatting tape with one partition.\n");
} else {
bp[psdo] = (size >> 8) & 0xff;
bp[psdo + 1] = size & 0xff;
bp[pgo + 3] = 1;
if (bp[pgo + MP_OFF_PAGE_LENGTH] < 8)
bp[pgo + MP_OFF_PAGE_LENGTH] = 8;
- DEBC(printk(ST_DEB_MSG
- "%s: Formatting tape with two partitions (1 = %d MB).\n",
- name, size));
+ DEBC_printk(STp, "Formatting tape with two partitions "
+ "(1 = %d MB).\n", size);
}
bp[pgo + PP_OFF_PART_UNITS] = 0;
bp[pgo + PP_OFF_RESERVED] = 0;
@@ -3314,7 +3328,7 @@ static int partition_tape(struct scsi_tape *STp, int size)
result = write_mode_page(STp, PART_PAGE, 1);
if (result) {
- printk(KERN_INFO "%s: Partitioning of tape failed.\n", name);
+ st_printk(KERN_INFO, STp, "Partitioning of tape failed.\n");
result = (-EIO);
}
@@ -3332,15 +3346,14 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
struct scsi_tape *STp = file->private_data;
struct st_modedef *STm;
struct st_partstat *STps;
- char *name = tape_name(STp);
void __user *p = (void __user *)arg;
if (mutex_lock_interruptible(&STp->lock))
return -ERESTARTSYS;
- DEB(
+ DEB(
if (debugging && !STp->in_use) {
- printk(ST_DEB_MSG "%s: Incorrect device.\n", name);
+ st_printk(ST_DEB_MSG, STp, "Incorrect device.\n");
retval = (-EIO);
goto out;
} ) /* end DEB */
@@ -3378,8 +3391,8 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
}
if (mtc.mt_op == MTSETDRVBUFFER && !capable(CAP_SYS_ADMIN)) {
- printk(KERN_WARNING
- "%s: MTSETDRVBUFFER only allowed for root.\n", name);
+ st_printk(KERN_WARNING, STp,
+ "MTSETDRVBUFFER only allowed for root.\n");
retval = (-EPERM);
goto out;
}
@@ -4087,7 +4100,8 @@ static int st_probe(struct device *dev)
return -ENODEV;
if ((stp = st_incompatible(SDp))) {
sdev_printk(KERN_INFO, SDp, "Found incompatible tape\n");
- printk(KERN_INFO "st: The suggested driver is %s.\n", stp);
+ sdev_printk(KERN_INFO, SDp,
+ "st: The suggested driver is %s.\n", stp);
return -ENODEV;
}
@@ -4096,20 +4110,23 @@ static int st_probe(struct device *dev)
i = st_max_sg_segs;
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
if (buffer == NULL) {
- printk(KERN_ERR
- "st: Can't allocate new tape buffer. Device not attached.\n");
+ sdev_printk(KERN_ERR, SDp,
+ "st: Can't allocate new tape buffer. "
+ "Device not attached.\n");
goto out;
}
disk = alloc_disk(1);
if (!disk) {
- printk(KERN_ERR "st: out of memory. Device not attached.\n");
+ sdev_printk(KERN_ERR, SDp,
+ "st: out of memory. Device not attached.\n");
goto out_buffer_free;
}
tpnt = kzalloc(sizeof(struct scsi_tape), GFP_ATOMIC);
if (tpnt == NULL) {
- printk(KERN_ERR "st: Can't allocate device descriptor.\n");
+ sdev_printk(KERN_ERR, SDp,
+ "st: Can't allocate device descriptor.\n");
goto out_put_disk;
}
kref_init(&tpnt->kref);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 9969fa1ef7c4..fecac5d03fdd 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -33,6 +33,7 @@
#include <linux/device.h>
#include <linux/hyperv.h>
#include <linux/mempool.h>
+#include <linux/blkdev.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -326,21 +327,23 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
*/
static int storvsc_timeout = 180;
+static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
+
#define STORVSC_MAX_IO_REQUESTS 200
static void storvsc_on_channel_callback(void *context);
-/*
- * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
- * reality, the path/target is not used (ie always set to 0) so our
- * scsi host adapter essentially has 1 bus with 1 target that contains
- * up to 256 luns.
- */
-#define STORVSC_MAX_LUNS_PER_TARGET 64
-#define STORVSC_MAX_TARGETS 1
-#define STORVSC_MAX_CHANNELS 1
+#define STORVSC_MAX_LUNS_PER_TARGET 255
+#define STORVSC_MAX_TARGETS 2
+#define STORVSC_MAX_CHANNELS 8
+#define STORVSC_FC_MAX_LUNS_PER_TARGET 255
+#define STORVSC_FC_MAX_TARGETS 128
+#define STORVSC_FC_MAX_CHANNELS 8
+#define STORVSC_IDE_MAX_LUNS_PER_TARGET 64
+#define STORVSC_IDE_MAX_TARGETS 1
+#define STORVSC_IDE_MAX_CHANNELS 1
struct storvsc_cmd_request {
struct list_head entry;
@@ -1017,6 +1020,13 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case ATA_12:
set_host_byte(scmnd, DID_PASSTHROUGH);
break;
+ /*
+ * On Some Windows hosts TEST_UNIT_READY command can return
+ * SRB_STATUS_ERROR, let the upper level code deal with it
+ * based on the sense information.
+ */
+ case TEST_UNIT_READY:
+ break;
default:
set_host_byte(scmnd, DID_TARGET_FAILURE);
}
@@ -1441,6 +1451,14 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
sdevice->no_write_same = 1;
+ /*
+ * Add blist flags to permit the reading of the VPD pages even when
+ * the target may claim SPC-2 compliance. MSFT targets currently
+ * claim SPC-2 compliance while they implement post SPC-2 features.
+ * With this patch we can correctly handle WRITE_SAME_16 issues.
+ */
+ sdevice->sdev_bflags |= msft_blist_flags;
+
return 0;
}
@@ -1518,6 +1536,16 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
return SUCCESS;
}
+/*
+ * The host guarantees to respond to each command, although I/O latencies might
+ * be unbounded on Azure. Reset the timer unconditionally to give the host a
+ * chance to perform EH.
+ */
+static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd)
+{
+ return BLK_EH_RESET_TIMER;
+}
+
static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
{
bool allowed = true;
@@ -1553,9 +1581,19 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct vmscsi_request *vm_srb;
struct stor_mem_pools *memp = scmnd->device->hostdata;
- if (!storvsc_scsi_cmd_ok(scmnd)) {
- scmnd->scsi_done(scmnd);
- return 0;
+ if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) {
+ /*
+ * On legacy hosts filter unimplemented commands.
+ * Future hosts are expected to correctly handle
+ * unsupported commands. Furthermore, it is
+ * possible that some of the currently
+ * unsupported commands maybe supported in
+ * future versions of the host.
+ */
+ if (!storvsc_scsi_cmd_ok(scmnd)) {
+ scmnd->scsi_done(scmnd);
+ return 0;
+ }
}
request_size = sizeof(struct storvsc_cmd_request);
@@ -1580,26 +1618,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
vm_srb = &cmd_request->vstor_packet.vm_srb;
vm_srb->win8_extension.time_out_value = 60;
+ vm_srb->win8_extension.srb_flags |=
+ (SRB_FLAGS_QUEUE_ACTION_ENABLE |
+ SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
/* Build the SRB */
switch (scmnd->sc_data_direction) {
case DMA_TO_DEVICE:
vm_srb->data_in = WRITE_TYPE;
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_OUT;
- vm_srb->win8_extension.srb_flags |=
- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
break;
case DMA_FROM_DEVICE:
vm_srb->data_in = READ_TYPE;
vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
- vm_srb->win8_extension.srb_flags |=
- (SRB_FLAGS_QUEUE_ACTION_ENABLE |
- SRB_FLAGS_DISABLE_SYNCH_TRANSFER);
break;
default:
vm_srb->data_in = UNKNOWN_TYPE;
- vm_srb->win8_extension.srb_flags = 0;
+ vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
+ SRB_FLAGS_DATA_OUT);
break;
}
@@ -1687,11 +1723,11 @@ static struct scsi_host_template scsi_driver = {
.bios_param = storvsc_get_chs,
.queuecommand = storvsc_queuecommand,
.eh_host_reset_handler = storvsc_host_reset_handler,
+ .eh_timed_out = storvsc_eh_timed_out,
.slave_alloc = storvsc_device_alloc,
.slave_destroy = storvsc_device_destroy,
.slave_configure = storvsc_device_configure,
- .cmd_per_lun = 1,
- /* 64 max_queue * 1 target */
+ .cmd_per_lun = 255,
.can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
.this_id = -1,
/* no use setting to 0 since ll_blk_rw reset it to 1 */
@@ -1743,19 +1779,25 @@ static int storvsc_probe(struct hv_device *device,
* set state to properly communicate with the host.
*/
- if (vmbus_proto_version == VERSION_WIN8) {
- sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
- vmscsi_size_delta = 0;
- vmstor_current_major = VMSTOR_WIN8_MAJOR;
- vmstor_current_minor = VMSTOR_WIN8_MINOR;
- } else {
+ switch (vmbus_proto_version) {
+ case VERSION_WS2008:
+ case VERSION_WIN7:
sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
vmstor_current_major = VMSTOR_WIN7_MAJOR;
vmstor_current_minor = VMSTOR_WIN7_MINOR;
+ break;
+ default:
+ sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
+ vmscsi_size_delta = 0;
+ vmstor_current_major = VMSTOR_WIN8_MAJOR;
+ vmstor_current_minor = VMSTOR_WIN8_MINOR;
+ break;
}
-
+ if (dev_id->driver_data == SFC_GUID)
+ scsi_driver.can_queue = (STORVSC_MAX_IO_REQUESTS *
+ STORVSC_FC_MAX_TARGETS);
host = scsi_host_alloc(&scsi_driver,
sizeof(struct hv_host_device));
if (!host)
@@ -1789,12 +1831,25 @@ static int storvsc_probe(struct hv_device *device,
host_dev->path = stor_device->path_id;
host_dev->target = stor_device->target_id;
- /* max # of devices per target */
- host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
- /* max # of targets per channel */
- host->max_id = STORVSC_MAX_TARGETS;
- /* max # of channels */
- host->max_channel = STORVSC_MAX_CHANNELS - 1;
+ switch (dev_id->driver_data) {
+ case SFC_GUID:
+ host->max_lun = STORVSC_FC_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_FC_MAX_TARGETS;
+ host->max_channel = STORVSC_FC_MAX_CHANNELS - 1;
+ break;
+
+ case SCSI_GUID:
+ host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_MAX_TARGETS;
+ host->max_channel = STORVSC_MAX_CHANNELS - 1;
+ break;
+
+ default:
+ host->max_lun = STORVSC_IDE_MAX_LUNS_PER_TARGET;
+ host->max_id = STORVSC_IDE_MAX_TARGETS;
+ host->max_channel = STORVSC_IDE_MAX_CHANNELS - 1;
+ break;
+ }
/* max cmd length */
host->max_cmd_len = STORVSC_MAX_CMD_LEN;
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 88220794cc98..1a2367a1b1f2 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -355,17 +355,18 @@ static void __init init_tags( void )
static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
{
+ u8 lun = cmd->device->lun;
SETUP_HOSTDATA(cmd->device->host);
- if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun))
+ if (hostdata->busy[cmd->device->id] & (1 << lun))
return( 1 );
if (!should_be_tagged ||
!setup_use_tagged_queuing || !cmd->device->tagged_supported)
return( 0 );
- if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
- TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) {
+ if (TagAlloc[cmd->device->id][lun].nr_allocated >=
+ TagAlloc[cmd->device->id][lun].queue_size ) {
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
- H_NO(cmd), cmd->device->id, cmd->device->lun );
+ H_NO(cmd), cmd->device->id, lun );
return( 1 );
}
return( 0 );
@@ -379,6 +380,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
{
+ u8 lun = cmd->device->lun;
SETUP_HOSTDATA(cmd->device->host);
/* If we or the target don't support tagged queuing, allocate the LUN for
@@ -387,19 +389,19 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
if (!should_be_tagged ||
!setup_use_tagged_queuing || !cmd->device->tagged_supported) {
cmd->tag = TAG_NONE;
- hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] |= (1 << lun);
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
- "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun );
+ "command\n", H_NO(cmd), cmd->device->id, lun );
}
else {
- TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
+ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );
set_bit( cmd->tag, &ta->allocated );
ta->nr_allocated++;
dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
"(now %d tags in use)\n",
- H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun,
+ H_NO(cmd), cmd->tag, cmd->device->id, lun,
ta->nr_allocated );
}
}
@@ -411,23 +413,24 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
static void cmd_free_tag(struct scsi_cmnd *cmd)
{
+ u8 lun = cmd->device->lun;
SETUP_HOSTDATA(cmd->device->host);
if (cmd->tag == TAG_NONE) {
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << lun);
dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
- H_NO(cmd), cmd->device->id, cmd->device->lun );
+ H_NO(cmd), cmd->device->id, lun );
}
else if (cmd->tag >= MAX_TAGS) {
printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
H_NO(cmd), cmd->tag );
}
else {
- TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
+ TAG_ALLOC *ta = &TagAlloc[cmd->device->id][lun];
clear_bit( cmd->tag, &ta->allocated );
ta->nr_allocated--;
dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
- H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun );
+ H_NO(cmd), cmd->tag, cmd->device->id, lun );
}
}
@@ -659,7 +662,7 @@ static void lprint_Scsi_Cmnd(Scsi_Cmnd *cmd)
{
int i, s;
unsigned char *command;
- printk("scsi%d: destination target %d, lun %d\n",
+ printk("scsi%d: destination target %d, lun %llu\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
printk(KERN_CONT " command = ");
command = cmd->cmnd;
@@ -705,7 +708,7 @@ static void show_Scsi_Cmnd(Scsi_Cmnd *cmd, struct seq_file *m)
{
int i, s;
unsigned char *command;
- seq_printf(m, "scsi%d: destination target %d, lun %d\n",
+ seq_printf(m, "scsi%d: destination target %d, lun %llu\n",
H_NO(cmd), cmd->device->id, cmd->device->lun);
seq_printf(m, " command = ");
command = cmd->cmnd;
@@ -1007,7 +1010,7 @@ static void NCR5380_main (struct work_struct *bl)
prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) {
if (prev != tmp)
- dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
+ dprintk(NDEBUG_LISTS, "MAIN tmp=%p target=%d busy=%d lun=%llu\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
/* When we find one, remove it from the issue queue. */
/* ++guenther: possible race with Falcon locking */
if (
@@ -1038,7 +1041,7 @@ static void NCR5380_main (struct work_struct *bl)
* issue queue so we can keep trying.
*/
dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
- "lun %d removed from issue_queue\n",
+ "lun %llu removed from issue_queue\n",
HOSTNO, tmp->device->id, tmp->device->lun);
/*
* REQUEST SENSE commands are issued without tagged
@@ -2020,7 +2023,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
* accesses to this device will use the
* polled-IO. */
printk(KERN_NOTICE "scsi%d: switching target %d "
- "lun %d to slow handshake\n", HOSTNO,
+ "lun %llu to slow handshake\n", HOSTNO,
cmd->device->id, cmd->device->lun);
cmd->device->borken = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
@@ -2078,7 +2081,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked command "
"complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
/* Enable reselect interrupts */
@@ -2090,7 +2093,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
*/
if (!cmd->next_link) {
- printk(KERN_NOTICE "scsi%d: target %d lun %d "
+ printk(KERN_NOTICE "scsi%d: target %d lun %llu "
"linked command complete, no next_link\n",
HOSTNO, cmd->device->id, cmd->device->lun);
sink = 1;
@@ -2103,7 +2106,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
* and don't free it! */
cmd->next_link->tag = cmd->tag;
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
- dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
+ dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %llu linked request "
"done, calling scsi_done().\n",
HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef NCR5380_STATS
@@ -2118,7 +2121,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
hostdata->connected = NULL;
- dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %llu "
"completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
#ifdef SUPPORT_TAGS
cmd_free_tag( cmd );
@@ -2132,7 +2135,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
/* ++Andreas: the mid level code knows about
QUEUE_FULL now. */
TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
- dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu returned "
"QUEUE_FULL after %d commands\n",
HOSTNO, cmd->device->id, cmd->device->lun,
ta->nr_allocated);
@@ -2228,7 +2231,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
cmd->device->tagged_supported = 0;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
cmd->tag = TAG_NONE;
- dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
+ dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %llu rejected "
"QUEUE_TAG message; tagged queuing "
"disabled\n",
HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2245,7 +2248,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
local_irq_restore(flags);
- dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
+ dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %llu was "
"moved from connected to the "
"disconnected_queue\n", HOSTNO,
cmd->device->id, cmd->device->lun);
@@ -2349,12 +2352,12 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
printk("\n");
} else if (tmp != EXTENDED_MESSAGE)
printk(KERN_DEBUG "scsi%d: rejecting unknown "
- "message %02x from target %d, lun %d\n",
+ "message %02x from target %d, lun %llu\n",
HOSTNO, tmp, cmd->device->id, cmd->device->lun);
else
printk(KERN_DEBUG "scsi%d: rejecting unknown "
"extended message "
- "code %02x, length %d from target %d, lun %d\n",
+ "code %02x, length %d from target %d, lun %llu\n",
HOSTNO, extended_msg[1], extended_msg[0],
cmd->device->id, cmd->device->lun);
@@ -2576,7 +2579,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
#endif
hostdata->connected = tmp;
- dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+ dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %llu, tag = %d\n",
HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 6d3ee1ab6362..e59e6f96b725 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -851,7 +851,7 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
* so let's try to stop all on-going I/O.
*/
starget_printk(KERN_WARNING, tp->starget,
- "Removing busy LCB (%d)\n", sdev->lun);
+ "Removing busy LCB (%d)\n", (u8)sdev->lun);
sym_reset_scsi_bus(np, 1);
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 5a80cbac3f92..a141b1758033 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -581,7 +581,7 @@ struct sym_pmc {
#define sym_lp(tp, lun) (!lun) ? (tp)->lun0p : NULL
#else
#define sym_lp(tp, lun) \
- (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[(lun)] : NULL
+ (!lun) ? (tp)->lun0p : (tp)->lunmp ? (tp)->lunmp[((u8)lun)] : NULL
#endif
/*
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index b006cf789ba1..764575726c85 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -621,7 +621,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr
{
dc390_freetag (pDCB, pSRB);
DEBUG0(printk ("DC390: Interrupt during Start SCSI (target %02i-%02i)\n",
- scmd->device->id, scmd->device->lun));
+ scmd->device->id, (u8)scmd->device->lun));
pSRB->SRBState = SRB_READY;
//DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD);
pACB->SelLost++;
@@ -1726,7 +1726,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
} else {
SET_RES_DRV(pcmd->result, DRIVER_SENSE);
//pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
- DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
+ DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun));
pSRB->TotalXferredLen = 0;
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
}
@@ -1746,7 +1746,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
else if (status == SAM_STAT_TASK_SET_FULL)
{
scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
- DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
+ DEBUG0 (printk ("DC390: RETRY (%02x), target %02i-%02i\n", pcmd->cmnd[0], pcmd->device->id, (u8)pcmd->device->lun));
pSRB->TotalXferredLen = 0;
SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
}
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 5a03bb3bcfef..4e76fe863fc4 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1285,14 +1285,14 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
cpp->cpp_index = i;
SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
- if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%d.\n",
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%llu.\n",
BN(j), i, SCpnt->device->channel, SCpnt->device->id,
- SCpnt->device->lun);
+ (u8)SCpnt->device->lun);
cpp->opcode = OP_SCSI;
cpp->channel = SCpnt->device->channel;
cpp->target = SCpnt->device->id;
- cpp->lun = SCpnt->device->lun;
+ cpp->lun = (u8)SCpnt->device->lun;
cpp->SCpnt = SCpnt;
cpp->cdb_len = SCpnt->cmd_len;
memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
@@ -1663,10 +1663,10 @@ static int reorder(unsigned int j, unsigned long cursec,
if (link_statistics && (overlap || !(flushcount % link_statistics)))
for (n = 0; n < n_ready; n++) {
k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
- printk("%s %d.%d:%d mb %d fc %d nr %d sec %ld ns %u"\
+ printk("%s %d.%d:%llu mb %d fc %d nr %d sec %ld ns %u"\
" cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
(ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
- SCpnt->lun, k, flushcount, n_ready,
+ (u8)SCpnt->lun, k, flushcount, n_ready,
blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
YESNO(overlap), cpp->xdir);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index f42d1cee652a..fafcf5e354c6 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -41,7 +41,8 @@
#define MAX_CDB_SIZE 16
#define GENERAL_UPIU_REQUEST_SIZE 32
-#define QUERY_DESC_MAX_SIZE 256
+#define QUERY_DESC_MAX_SIZE 255
+#define QUERY_DESC_MIN_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
@@ -117,6 +118,41 @@ enum attr_idn {
QUERY_ATTR_IDN_EE_STATUS = 0x0E,
};
+/* Descriptor idn for Query requests */
+enum desc_idn {
+ QUERY_DESC_IDN_DEVICE = 0x0,
+ QUERY_DESC_IDN_CONFIGURAION = 0x1,
+ QUERY_DESC_IDN_UNIT = 0x2,
+ QUERY_DESC_IDN_RFU_0 = 0x3,
+ QUERY_DESC_IDN_INTERCONNECT = 0x4,
+ QUERY_DESC_IDN_STRING = 0x5,
+ QUERY_DESC_IDN_RFU_1 = 0x6,
+ QUERY_DESC_IDN_GEOMETRY = 0x7,
+ QUERY_DESC_IDN_POWER = 0x8,
+ QUERY_DESC_IDN_RFU_2 = 0x9,
+};
+
+#define UNIT_DESC_MAX_SIZE 0x22
+/* Unit descriptor parameters offsets in bytes*/
+enum unit_desc_param {
+ UNIT_DESC_PARAM_LEN = 0x0,
+ UNIT_DESC_PARAM_TYPE = 0x1,
+ UNIT_DESC_PARAM_UNIT_INDEX = 0x2,
+ UNIT_DESC_PARAM_LU_ENABLE = 0x3,
+ UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4,
+ UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5,
+ UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6,
+ UNIT_DESC_PARAM_MEM_TYPE = 0x8,
+ UNIT_DESC_PARAM_DATA_RELIABILITY = 0x9,
+ UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA,
+ UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB,
+ UNIT_DESC_PARAM_ERASE_BLK_SIZE = 0x13,
+ UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17,
+ UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18,
+ UNIT_DESC_PARAM_CTX_CAPABILITIES = 0x20,
+ UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
+};
+
/* Exception event mask values */
enum {
MASK_EE_STATUS = 0xFFFF,
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 8b9531204c2b..c007a7a69c28 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -135,26 +135,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
}
/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- * addressing capability
- * @pdev: PCI device structure
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct pci_dev *pdev)
-{
- int err;
-
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- return 0;
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- return err;
-}
-
-/**
* ufshcd_pci_probe - probe routine of the driver
* @pdev: pointer to PCI device handle
* @id: PCI device id
@@ -184,12 +164,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mmio_base = pcim_iomap_table(pdev)[0];
- err = ufshcd_set_dma_mask(pdev);
- if (err) {
- dev_err(&pdev->dev, "set dma mask failed\n");
- return err;
- }
-
err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0c2877251251..ba27215b8034 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -110,6 +110,8 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
+static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
+ struct scsi_device *sdev);
/*
* ufshcd_wait_for_register - wait for register value to change
@@ -446,30 +448,35 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
* @lrb - pointer to local reference block
*/
static
-void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
- /* Get the UPIU response */
- query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
- UPIU_RSP_CODE_OFFSET;
-
memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
-
/* Get the descriptor */
if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
- u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr +
+ u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
GENERAL_UPIU_REQUEST_SIZE;
- u16 len;
+ u16 resp_len;
+ u16 buf_len;
/* data segment length */
- len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
MASK_QUERY_DATA_SEG_LEN;
-
- memcpy(hba->dev_cmd.query.descriptor, descp,
- min_t(u16, len, QUERY_DESC_MAX_SIZE));
+ buf_len = be16_to_cpu(
+ hba->dev_cmd.query.request.upiu_req.length);
+ if (likely(buf_len >= resp_len)) {
+ memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
+ } else {
+ dev_warn(hba->dev,
+ "%s: Response size is bigger than buffer",
+ __func__);
+ return -EINVAL;
+ }
}
+
+ return 0;
}
/**
@@ -797,11 +804,9 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
QUERY_OSF_SIZE);
/* Copy the Descriptor */
- if ((len > 0) && (query->request.upiu_req.opcode ==
- UPIU_QUERY_OPCODE_WRITE_DESC)) {
- memcpy(descp, query->descriptor,
- min_t(u16, len, QUERY_DESC_MAX_SIZE));
- }
+ if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
+ memcpy(descp, query->descriptor, len);
+
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -980,6 +985,17 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
return err;
}
+static int
+ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+ return query_res->response;
+}
+
/**
* ufshcd_dev_cmd_completion() - handles device management command responses
* @hba: per adapter instance
@@ -1002,7 +1018,9 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
break;
case UPIU_TRANSACTION_QUERY_RSP:
- ufshcd_copy_query_response(hba, lrbp);
+ err = ufshcd_check_query_response(hba, lrbp);
+ if (!err)
+ err = ufshcd_copy_query_response(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -1134,6 +1152,30 @@ out_put_tag:
}
/**
+ * ufshcd_init_query() - init the query response and request parameters
+ * @hba: per-adapter instance
+ * @request: address of the request pointer to be initialized
+ * @response: address of the response pointer to be initialized
+ * @opcode: operation to perform
+ * @idn: flag idn to access
+ * @index: LU number to access
+ * @selector: query/flag/descriptor further identification
+ */
+static inline void ufshcd_init_query(struct ufs_hba *hba,
+ struct ufs_query_req **request, struct ufs_query_res **response,
+ enum query_opcode opcode, u8 idn, u8 index, u8 selector)
+{
+ *request = &hba->dev_cmd.query.request;
+ *response = &hba->dev_cmd.query.response;
+ memset(*request, 0, sizeof(struct ufs_query_req));
+ memset(*response, 0, sizeof(struct ufs_query_res));
+ (*request)->upiu_req.opcode = opcode;
+ (*request)->upiu_req.idn = idn;
+ (*request)->upiu_req.index = index;
+ (*request)->upiu_req.selector = selector;
+}
+
+/**
* ufshcd_query_flag() - API function for sending flag query requests
* hba: per-adapter instance
* query_opcode: flag query to perform
@@ -1145,17 +1187,15 @@ out_put_tag:
static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res)
{
- struct ufs_query_req *request;
- struct ufs_query_res *response;
- int err;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err, index = 0, selector = 0;
BUG_ON(!hba);
mutex_lock(&hba->dev_cmd.lock);
- request = &hba->dev_cmd.query.request;
- response = &hba->dev_cmd.query.response;
- memset(request, 0, sizeof(struct ufs_query_req));
- memset(response, 0, sizeof(struct ufs_query_res));
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
switch (opcode) {
case UPIU_QUERY_OPCODE_SET_FLAG:
@@ -1180,12 +1220,8 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
err = -EINVAL;
goto out_unlock;
}
- request->upiu_req.opcode = opcode;
- request->upiu_req.idn = idn;
- /* Send query request */
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
- QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
dev_err(hba->dev,
@@ -1217,8 +1253,8 @@ out_unlock:
static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
- struct ufs_query_req *request;
- struct ufs_query_res *response;
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
int err;
BUG_ON(!hba);
@@ -1231,10 +1267,8 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
}
mutex_lock(&hba->dev_cmd.lock);
- request = &hba->dev_cmd.query.request;
- response = &hba->dev_cmd.query.response;
- memset(request, 0, sizeof(struct ufs_query_req));
- memset(response, 0, sizeof(struct ufs_query_res));
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
switch (opcode) {
case UPIU_QUERY_OPCODE_WRITE_ATTR:
@@ -1251,14 +1285,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- request->upiu_req.opcode = opcode;
- request->upiu_req.idn = idn;
- request->upiu_req.index = index;
- request->upiu_req.selector = selector;
-
- /* Send query request */
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
- QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
@@ -1275,6 +1302,82 @@ out:
}
/**
+ * ufshcd_query_descriptor - API function for sending descriptor requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * desc_buf: the buffer that contains the descriptor
+ * buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+static int ufshcd_query_descriptor(struct ufs_hba *hba,
+ enum query_opcode opcode, enum desc_idn idn, u8 index,
+ u8 selector, u8 *desc_buf, int *buf_len)
+{
+ struct ufs_query_req *request = NULL;
+ struct ufs_query_res *response = NULL;
+ int err;
+
+ BUG_ON(!hba);
+
+ if (!desc_buf) {
+ dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
+ __func__, *buf_len);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ ufshcd_init_query(hba, &request, &response, opcode, idn, index,
+ selector);
+ hba->dev_cmd.query.descriptor = desc_buf;
+ request->upiu_req.length = cpu_to_be16(*buf_len);
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query descriptor opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
+ __func__, opcode, idn, err);
+ goto out_unlock;
+ }
+
+ hba->dev_cmd.query.descriptor = NULL;
+ *buf_len = be16_to_cpu(response->upiu_res.length);
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ return err;
+}
+
+/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
*
@@ -1878,6 +1981,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
static int ufshcd_slave_alloc(struct scsi_device *sdev)
{
struct ufs_hba *hba;
+ int lun_qdepth;
hba = shost_priv(sdev->host);
sdev->tagged_supported = 1;
@@ -1889,14 +1993,68 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
/* allow SCSI layer to restart the device in case of errors */
sdev->allow_restart = 1;
- /*
- * Inform SCSI Midlayer that the LUN queue depth is same as the
- * controller queue depth. If a LUN queue depth is less than the
- * controller queue depth and if the LUN reports
- * SAM_STAT_TASK_SET_FULL, the LUN queue depth will be adjusted
- * with scsi_adjust_queue_depth.
- */
- scsi_activate_tcq(sdev, hba->nutrs);
+ /* REPORT SUPPORTED OPERATION CODES is not supported */
+ sdev->no_report_opcodes = 1;
+
+ lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev);
+ if (lun_qdepth <= 0)
+ /* eventually, we can figure out the real queue depth */
+ lun_qdepth = hba->nutrs;
+ else
+ lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+ dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+ __func__, lun_qdepth);
+ scsi_activate_tcq(sdev, lun_qdepth);
+
+ return 0;
+}
+
+/**
+ * ufshcd_change_queue_depth - change queue depth
+ * @sdev: pointer to SCSI device
+ * @depth: required depth to set
+ * @reason: reason for changing the depth
+ *
+ * Change queue depth according to the reason and make sure
+ * the max. limits are not crossed.
+ */
+static int ufshcd_change_queue_depth(struct scsi_device *sdev,
+ int depth, int reason)
+{
+ struct ufs_hba *hba = shost_priv(sdev->host);
+
+ if (depth > hba->nutrs)
+ depth = hba->nutrs;
+
+ switch (reason) {
+ case SCSI_QDEPTH_DEFAULT:
+ case SCSI_QDEPTH_RAMP_UP:
+ if (!sdev->tagged_supported)
+ depth = 1;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+ break;
+ case SCSI_QDEPTH_QFULL:
+ scsi_track_queue_full(sdev, depth);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return depth;
+}
+
+/**
+ * ufshcd_slave_configure - adjust SCSI device configurations
+ * @sdev: pointer to SCSI device
+ */
+static int ufshcd_slave_configure(struct scsi_device *sdev)
+{
+ struct request_queue *q = sdev->request_queue;
+
+ blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
+
return 0;
}
@@ -1953,42 +2111,6 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
}
/**
- * ufshcd_adjust_lun_qdepth - Update LUN queue depth if device responds with
- * SAM_STAT_TASK_SET_FULL SCSI command status.
- * @cmd: pointer to SCSI command
- */
-static void ufshcd_adjust_lun_qdepth(struct scsi_cmnd *cmd)
-{
- struct ufs_hba *hba;
- int i;
- int lun_qdepth = 0;
-
- hba = shost_priv(cmd->device->host);
-
- /*
- * LUN queue depth can be obtained by counting outstanding commands
- * on the LUN.
- */
- for (i = 0; i < hba->nutrs; i++) {
- if (test_bit(i, &hba->outstanding_reqs)) {
-
- /*
- * Check if the outstanding command belongs
- * to the LUN which reported SAM_STAT_TASK_SET_FULL.
- */
- if (cmd->device->lun == hba->lrb[i].lun)
- lun_qdepth++;
- }
- }
-
- /*
- * LUN queue depth will be total outstanding commands, except the
- * command for which the LUN reported SAM_STAT_TASK_SET_FULL.
- */
- scsi_adjust_queue_depth(cmd->device, MSG_SIMPLE_TAG, lun_qdepth - 1);
-}
-
-/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
* @lrb: pointer to local reference block of completed command
* @scsi_status: SCSI command status
@@ -2009,12 +2131,6 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
scsi_status;
break;
case SAM_STAT_TASK_SET_FULL:
- /*
- * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue
- * depth needs to be adjusted to the exact number of
- * outstanding commands the LUN can handle at any given time.
- */
- ufshcd_adjust_lun_qdepth(lrbp->cmd);
case SAM_STAT_BUSY:
case SAM_STAT_TASK_ABORTED:
ufshcd_copy_sense_data(lrbp);
@@ -2134,47 +2250,42 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
u32 tr_doorbell;
int result;
int index;
- bool int_aggr_reset = false;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+ * In order to prevent other interrupts starvation the DB is read once
+ * after reset. The down side of this solution is the possibility of
+ * false interrupt if device completes another request after resetting
+ * aggregation and before reading the DB.
+ */
+ ufshcd_reset_intr_aggr(hba);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
- for (index = 0; index < hba->nutrs; index++) {
- if (test_bit(index, &completed_reqs)) {
- lrbp = &hba->lrb[index];
- cmd = lrbp->cmd;
- /*
- * Don't skip resetting interrupt aggregation counters
- * if a regular command is present.
- */
- int_aggr_reset |= !lrbp->intr_cmd;
-
- if (cmd) {
- result = ufshcd_transfer_rsp_status(hba, lrbp);
- scsi_dma_unmap(cmd);
- cmd->result = result;
- /* Mark completed command as NULL in LRB */
- lrbp->cmd = NULL;
- clear_bit_unlock(index, &hba->lrb_in_use);
- /* Do not touch lrbp after scsi done */
- cmd->scsi_done(cmd);
- } else if (lrbp->command_type ==
- UTP_CMD_TYPE_DEV_MANAGE) {
- if (hba->dev_cmd.complete)
- complete(hba->dev_cmd.complete);
- }
- } /* end of if */
- } /* end of for */
+ for_each_set_bit(index, &completed_reqs, hba->nutrs) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ if (cmd) {
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
+ /* Mark completed command as NULL in LRB */
+ lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.complete)
+ complete(hba->dev_cmd.complete);
+ }
+ }
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
/* we might have free'd some tags above */
wake_up(&hba->dev_cmd.tag_wq);
-
- /* Reset interrupt aggregation counters */
- if (int_aggr_reset)
- ufshcd_reset_intr_aggr(hba);
}
/**
@@ -2779,6 +2890,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
int poll_cnt;
u8 resp = 0xF;
struct ufshcd_lrb *lrbp;
+ u32 reg;
host = cmd->device->host;
hba = shost_priv(host);
@@ -2788,6 +2900,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
if (!(test_bit(tag, &hba->outstanding_reqs)))
goto out;
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!(reg & (1 << tag))) {
+ dev_err(hba->dev,
+ "%s: cmd was completed, but without a notifying intr, tag = %d",
+ __func__, tag);
+ }
+
lrbp = &hba->lrb[tag];
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
@@ -2796,8 +2915,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
/* cmd pending in the device */
break;
} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- u32 reg;
-
/*
* cmd not pending in the device, check if it is
* in transition.
@@ -2971,6 +3088,38 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
}
/**
+ * ufshcd_read_sdev_qdepth - read the lun command queue depth
+ * @hba: Pointer to adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ * Return in case of success the lun's queue depth else error.
+ */
+static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
+ struct scsi_device *sdev)
+{
+ int ret;
+ int buff_len = UNIT_DESC_MAX_SIZE;
+ u8 desc_buf[UNIT_DESC_MAX_SIZE];
+
+ ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len);
+
+ if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) {
+ dev_err(hba->dev,
+ "%s:Failed reading unit descriptor. len = %d ret = %d"
+ , __func__, buff_len, ret);
+ if (!ret)
+ ret = -EINVAL;
+
+ goto out;
+ }
+
+ ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF;
+out:
+ return ret;
+}
+
+/**
* ufshcd_async_scan - asynchronous execution for link startup
* @data: data pointer to pass to this function
* @cookie: cookie data
@@ -3012,7 +3161,9 @@ static struct scsi_host_template ufshcd_driver_template = {
.proc_name = UFSHCD,
.queuecommand = ufshcd_queuecommand,
.slave_alloc = ufshcd_slave_alloc,
+ .slave_configure = ufshcd_slave_configure,
.slave_destroy = ufshcd_slave_destroy,
+ .change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
@@ -3110,6 +3261,22 @@ void ufshcd_remove(struct ufs_hba *hba)
EXPORT_SYMBOL_GPL(ufshcd_remove);
/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+{
+ if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
+ if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
+ return 0;
+ }
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+}
+
+/**
* ufshcd_init - Driver initialization routine
* @dev: pointer to device handle
* @hba_handle: driver private handle
@@ -3160,6 +3327,12 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
+ err = ufshcd_set_dma_mask(hba);
+ if (err) {
+ dev_err(hba->dev, "set dma mask failed\n");
+ goto out_disable;
+ }
+
/* Allocate memory for host memory space */
err = ufshcd_memory_alloc(hba);
if (err) {
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9abc7e32b43d..e1b844bc9460 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -296,6 +296,11 @@ enum {
MASK_OCS = 0x0F,
};
+/* The maximum length of the data byte count field in the PRDT is 256KB */
+#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024)
+/* The granularity of the data byte count field in the PRDT is 32-bit */
+#define PRDT_DATA_BYTE_COUNT_PAD 4
+
/**
* struct ufshcd_sg_entry - UFSHCI PRD Entry
* @base_addr: Lower 32bit physical address DW-0
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 308256b5e4cb..eee1bc0b506e 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -27,6 +27,8 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include <linux/seqlock.h>
#define VIRTIO_SCSI_MEMPOOL_SZ 64
#define VIRTIO_SCSI_EVENT_LEN 8
@@ -75,18 +77,16 @@ struct virtio_scsi_vq {
* queue, and also lets the driver optimize the IRQ affinity for the virtqueues
* (each virtqueue's affinity is set to the CPU that "owns" the queue).
*
- * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
- * could be done locklessly, but we do not do it yet.
+ * tgt_seq is held to serialize reading and writing req_vq.
*
* Decrements of reqs are never concurrent with writes of req_vq: before the
* decrement reqs will be != 0; after the decrement the virtqueue completion
* routine will not use the req_vq so it can be changed by a new request.
- * Thus they can happen outside the tgt_lock, provided of course we make reqs
+ * Thus they can happen outside the tgt_seq, provided of course we make reqs
* an atomic_t.
*/
struct virtio_scsi_target_state {
- /* This spinlock never held at the same time as vq_lock. */
- spinlock_t tgt_lock;
+ seqcount_t tgt_seq;
/* Count of outstanding requests. */
atomic_t reqs;
@@ -559,19 +559,33 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
unsigned long flags;
u32 queue_num;
- spin_lock_irqsave(&tgt->tgt_lock, flags);
+ local_irq_save(flags);
+ if (atomic_inc_return(&tgt->reqs) > 1) {
+ unsigned long seq;
+
+ do {
+ seq = read_seqcount_begin(&tgt->tgt_seq);
+ vq = tgt->req_vq;
+ } while (read_seqcount_retry(&tgt->tgt_seq, seq));
+ } else {
+ /* no writes can be concurrent because of atomic_t */
+ write_seqcount_begin(&tgt->tgt_seq);
+
+ /* keep previous req_vq if a reader just arrived */
+ if (unlikely(atomic_read(&tgt->reqs) > 1)) {
+ vq = tgt->req_vq;
+ goto unlock;
+ }
- if (atomic_inc_return(&tgt->reqs) > 1)
- vq = tgt->req_vq;
- else {
queue_num = smp_processor_id();
while (unlikely(queue_num >= vscsi->num_queues))
queue_num -= vscsi->num_queues;
-
tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
+ unlock:
+ write_seqcount_end(&tgt->tgt_seq);
}
+ local_irq_restore(flags);
- spin_unlock_irqrestore(&tgt->tgt_lock, flags);
return vq;
}
@@ -641,6 +655,36 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return virtscsi_tmf(vscsi, cmd);
}
+/**
+ * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
+ * @sdev: Virtscsi target whose queue depth to change
+ * @qdepth: New queue depth
+ * @reason: Reason for the queue depth change.
+ */
+static int virtscsi_change_queue_depth(struct scsi_device *sdev,
+ int qdepth,
+ int reason)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth = shost->cmd_per_lun;
+
+ switch (reason) {
+ case SCSI_QDEPTH_QFULL: /* Drop qdepth in response to BUSY state */
+ scsi_track_queue_full(sdev, qdepth);
+ break;
+ case SCSI_QDEPTH_RAMP_UP: /* Raise qdepth after BUSY state resolved */
+ case SCSI_QDEPTH_DEFAULT: /* Manual change via sysfs */
+ scsi_adjust_queue_depth(sdev,
+ scsi_get_tag_type(sdev),
+ min(max_depth, qdepth));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return sdev->queue_depth;
+}
+
static int virtscsi_abort(struct scsi_cmnd *sc)
{
struct virtio_scsi *vscsi = shost_priv(sc->device->host);
@@ -667,14 +711,17 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
static int virtscsi_target_alloc(struct scsi_target *starget)
{
+ struct Scsi_Host *sh = dev_to_shost(starget->dev.parent);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+
struct virtio_scsi_target_state *tgt =
kmalloc(sizeof(*tgt), GFP_KERNEL);
if (!tgt)
return -ENOMEM;
- spin_lock_init(&tgt->tgt_lock);
+ seqcount_init(&tgt->tgt_seq);
atomic_set(&tgt->reqs, 0);
- tgt->req_vq = NULL;
+ tgt->req_vq = &vscsi->req_vqs[0];
starget->hostdata = tgt;
return 0;
@@ -693,6 +740,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
.this_id = -1,
.cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand_single,
+ .change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
@@ -710,6 +758,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.this_id = -1,
.cmd_size = sizeof(struct virtio_scsi_cmd),
.queuecommand = virtscsi_queuecommand_multi,
+ .change_queue_depth = virtscsi_change_queue_depth,
.eh_abort_handler = virtscsi_abort,
.eh_device_reset_handler = virtscsi_device_reset,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index c88e1468aad7..598f65efaaec 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1194,7 +1194,7 @@ static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
int ret;
- ret = pci_enable_msix(adapter->dev, &entry, 1);
+ ret = pci_enable_msix_exact(adapter->dev, &entry, 1);
if (ret)
return ret;
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 41883a87931d..c0506de4f3b6 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -502,7 +502,8 @@ wd33c93_execute(struct Scsi_Host *instance)
cmd = (struct scsi_cmnd *) hostdata->input_Q;
prev = NULL;
while (cmd) {
- if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
+ if (!(hostdata->busy[cmd->device->id] &
+ (1 << (cmd->device->lun & 0xff))))
break;
prev = cmd;
cmd = (struct scsi_cmnd *) cmd->host_scribble;
@@ -593,10 +594,10 @@ wd33c93_execute(struct Scsi_Host *instance)
write_wd33c93(regs, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
- write_wd33c93(regs, WD_TARGET_LUN, cmd->device->lun);
+ write_wd33c93(regs, WD_TARGET_LUN, (u8)cmd->device->lun);
write_wd33c93(regs, WD_SYNCHRONOUS_TRANSFER,
hostdata->sync_xfer[cmd->device->id]);
- hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] |= (1 << (cmd->device->lun & 0xFF));
if ((hostdata->level2 == L2_NONE) ||
(hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
@@ -862,7 +863,7 @@ wd33c93_intr(struct Scsi_Host *instance)
}
cmd->result = DID_NO_CONNECT << 16;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
cmd->scsi_done(cmd);
@@ -895,7 +896,7 @@ wd33c93_intr(struct Scsi_Host *instance)
/* construct an IDENTIFY message with correct disconnect bit */
- hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun);
+ hostdata->outgoing_msg[0] = IDENTIFY(0, cmd->device->lun);
if (cmd->SCp.phase)
hostdata->outgoing_msg[0] |= 0x40;
@@ -1179,7 +1180,7 @@ wd33c93_intr(struct Scsi_Host *instance)
lun = read_wd33c93(regs, WD_TARGET_LUN);
DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
hostdata->connected = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
cmd->SCp.Status = lun;
@@ -1268,7 +1269,7 @@ wd33c93_intr(struct Scsi_Host *instance)
}
DB(DB_INTR, printk("UNEXP_DISC"))
hostdata->connected = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
cmd->result =
@@ -1300,7 +1301,7 @@ wd33c93_intr(struct Scsi_Host *instance)
switch (hostdata->state) {
case S_PRE_CMP_DISC:
hostdata->connected = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
DB(DB_INTR, printk(":%d", cmd->SCp.Status))
if (cmd->cmnd[0] == REQUEST_SENSE
@@ -1353,7 +1354,7 @@ wd33c93_intr(struct Scsi_Host *instance)
if (hostdata->selecting) {
cmd = (struct scsi_cmnd *) hostdata->selecting;
hostdata->selecting = NULL;
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
cmd->host_scribble =
(uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
@@ -1365,7 +1366,7 @@ wd33c93_intr(struct Scsi_Host *instance)
if (cmd) {
if (phs == 0x00) {
hostdata->busy[cmd->device->id] &=
- ~(1 << cmd->device->lun);
+ ~(1 << (cmd->device->lun & 0xff));
cmd->host_scribble =
(uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
@@ -1448,7 +1449,7 @@ wd33c93_intr(struct Scsi_Host *instance)
cmd = (struct scsi_cmnd *) hostdata->disconnected_Q;
patch = NULL;
while (cmd) {
- if (id == cmd->device->id && lun == cmd->device->lun)
+ if (id == cmd->device->id && lun == (u8)cmd->device->lun)
break;
patch = cmd;
cmd = (struct scsi_cmnd *) cmd->host_scribble;
@@ -1459,7 +1460,7 @@ wd33c93_intr(struct Scsi_Host *instance)
if (!cmd) {
printk
("---TROUBLE: target %d.%d not in disconnect queue---",
- id, lun);
+ id, (u8)lun);
spin_unlock_irqrestore(&hostdata->lock, flags);
return;
}
@@ -1705,7 +1706,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
sr = read_wd33c93(regs, WD_SCSI_STATUS);
printk("asr=%02x, sr=%02x.", asr, sr);
- hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
+ hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->connected = NULL;
hostdata->state = S_UNCONNECTED;
cmd->result = DID_ABORT << 16;
@@ -2169,7 +2170,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "\nconnected: ");
if (hd->connected) {
cmd = (struct scsi_cmnd *) hd->connected;
- seq_printf(m, " %d:%d(%02x)",
+ seq_printf(m, " %d:%llu(%02x)",
cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
}
}
@@ -2177,7 +2178,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "\ninput_Q: ");
cmd = (struct scsi_cmnd *) hd->input_Q;
while (cmd) {
- seq_printf(m, " %d:%d(%02x)",
+ seq_printf(m, " %d:%llu(%02x)",
cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (struct scsi_cmnd *) cmd->host_scribble;
}
@@ -2186,7 +2187,7 @@ wd33c93_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "\ndisconnected_Q:");
cmd = (struct scsi_cmnd *) hd->disconnected_Q;
while (cmd) {
- seq_printf(m, " %d:%d(%02x)",
+ seq_printf(m, " %d:%llu(%02x)",
cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
cmd = (struct scsi_cmnd *) cmd->host_scribble;
}
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 0f7c44793b29..3b1b95d932d1 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -3,3 +3,4 @@
#
obj-$(CONFIG_ARCH_QCOM) += qcom/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
new file mode 100644
index 000000000000..cdaad9d53a05
--- /dev/null
+++ b/drivers/soc/tegra/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_ARCH_TEGRA) += fuse/
+
+obj-$(CONFIG_ARCH_TEGRA) += common.o
+obj-$(CONFIG_ARCH_TEGRA) += pmc.o
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
new file mode 100644
index 000000000000..a71cb74f3674
--- /dev/null
+++ b/drivers/soc/tegra/common.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+
+#include <soc/tegra/common.h>
+
+static const struct of_device_id tegra_machine_match[] = {
+ { .compatible = "nvidia,tegra20", },
+ { .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra114", },
+ { .compatible = "nvidia,tegra124", },
+ { }
+};
+
+bool soc_is_tegra(void)
+{
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root)
+ return false;
+
+ return of_match_node(tegra_machine_match, root) != NULL;
+}
diff --git a/drivers/soc/tegra/fuse/Makefile b/drivers/soc/tegra/fuse/Makefile
new file mode 100644
index 000000000000..3af357da91f3
--- /dev/null
+++ b/drivers/soc/tegra/fuse/Makefile
@@ -0,0 +1,8 @@
+obj-y += fuse-tegra.o
+obj-y += fuse-tegra30.o
+obj-y += tegra-apbmisc.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += fuse-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += speedo-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += speedo-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_114_SOC) += speedo-tegra114.o
+obj-$(CONFIG_ARCH_TEGRA_124_SOC) += speedo-tegra124.o
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
new file mode 100644
index 000000000000..11a5043959dc
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+static u32 (*fuse_readl)(const unsigned int offset);
+static int fuse_size;
+struct tegra_sku_info tegra_sku_info;
+
+static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
+ [TEGRA_REVISION_UNKNOWN] = "unknown",
+ [TEGRA_REVISION_A01] = "A01",
+ [TEGRA_REVISION_A02] = "A02",
+ [TEGRA_REVISION_A03] = "A03",
+ [TEGRA_REVISION_A03p] = "A03 prime",
+ [TEGRA_REVISION_A04] = "A04",
+};
+
+static u8 fuse_readb(const unsigned int offset)
+{
+ u32 val;
+
+ val = fuse_readl(round_down(offset, 4));
+ val >>= (offset % 4) * 8;
+ val &= 0xff;
+
+ return val;
+}
+
+static ssize_t fuse_read(struct file *fd, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t size)
+{
+ int i;
+
+ if (pos < 0 || pos >= fuse_size)
+ return 0;
+
+ if (size > fuse_size - pos)
+ size = fuse_size - pos;
+
+ for (i = 0; i < size; i++)
+ buf[i] = fuse_readb(pos + i);
+
+ return i;
+}
+
+static struct bin_attribute fuse_bin_attr = {
+ .attr = { .name = "fuse", .mode = S_IRUGO, },
+ .read = fuse_read,
+};
+
+static const struct of_device_id car_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-car", },
+ { .compatible = "nvidia,tegra30-car", },
+ { .compatible = "nvidia,tegra114-car", },
+ { .compatible = "nvidia,tegra124-car", },
+ {},
+};
+
+static void tegra_enable_fuse_clk(void __iomem *base)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + 0x48);
+ reg |= 1 << 28;
+ writel(reg, base + 0x48);
+
+ /*
+ * Enable FUSE clock. This needs to be hardcoded because the clock
+ * subsystem is not active during early boot.
+ */
+ reg = readl(base + 0x14);
+ reg |= 1 << 7;
+ writel(reg, base + 0x14);
+}
+
+int tegra_fuse_readl(unsigned long offset, u32 *value)
+{
+ if (!fuse_readl)
+ return -EPROBE_DEFER;
+
+ *value = fuse_readl(offset);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_fuse_readl);
+
+int tegra_fuse_create_sysfs(struct device *dev, int size,
+ u32 (*readl)(const unsigned int offset))
+{
+ if (fuse_size)
+ return -ENODEV;
+
+ fuse_bin_attr.size = size;
+ fuse_bin_attr.read = fuse_read;
+
+ fuse_size = size;
+ fuse_readl = readl;
+
+ return device_create_bin_file(dev, &fuse_bin_attr);
+}
+
+static int __init tegra_init_fuse(void)
+{
+ struct device_node *np;
+ void __iomem *car_base;
+
+ if (!soc_is_tegra())
+ return 0;
+
+ tegra_init_apbmisc();
+
+ np = of_find_matching_node(NULL, car_match);
+ car_base = of_iomap(np, 0);
+ if (car_base) {
+ tegra_enable_fuse_clk(car_base);
+ iounmap(car_base);
+ } else {
+ pr_err("Could not enable fuse clk. ioremap tegra car failed.\n");
+ return -ENXIO;
+ }
+
+ if (tegra_get_chip_id() == TEGRA20)
+ tegra20_init_fuse_early();
+ else
+ tegra30_init_fuse_early();
+
+ pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
+ tegra_revision_name[tegra_sku_info.revision],
+ tegra_sku_info.sku_id, tegra_sku_info.cpu_process_id,
+ tegra_sku_info.core_process_id);
+ pr_debug("Tegra CPU Speedo ID %d, Soc Speedo ID %d\n",
+ tegra_sku_info.cpu_speedo_id, tegra_sku_info.soc_speedo_id);
+
+ return 0;
+}
+early_initcall(tegra_init_fuse);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra20.c b/drivers/soc/tegra/fuse/fuse-tegra20.c
new file mode 100644
index 000000000000..7cb63ab6aac2
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra20.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Based on drivers/misc/eeprom/sunxi_sid.c
+ */
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define FUSE_BEGIN 0x100
+#define FUSE_SIZE 0x1f8
+#define FUSE_UID_LOW 0x08
+#define FUSE_UID_HIGH 0x0c
+
+static phys_addr_t fuse_phys;
+static struct clk *fuse_clk;
+static void __iomem __initdata *fuse_base;
+
+static DEFINE_MUTEX(apb_dma_lock);
+static DECLARE_COMPLETION(apb_dma_wait);
+static struct dma_chan *apb_dma_chan;
+static struct dma_slave_config dma_sconfig;
+static u32 *apb_buffer;
+static dma_addr_t apb_buffer_phys;
+
+static void apb_dma_complete(void *args)
+{
+ complete(&apb_dma_wait);
+}
+
+static u32 tegra20_fuse_readl(const unsigned int offset)
+{
+ int ret;
+ u32 val = 0;
+ struct dma_async_tx_descriptor *dma_desc;
+
+ mutex_lock(&apb_dma_lock);
+
+ dma_sconfig.src_addr = fuse_phys + FUSE_BEGIN + offset;
+ ret = dmaengine_slave_config(apb_dma_chan, &dma_sconfig);
+ if (ret)
+ goto out;
+
+ dma_desc = dmaengine_prep_slave_single(apb_dma_chan, apb_buffer_phys,
+ sizeof(u32), DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc)
+ goto out;
+
+ dma_desc->callback = apb_dma_complete;
+ dma_desc->callback_param = NULL;
+
+ reinit_completion(&apb_dma_wait);
+
+ clk_prepare_enable(fuse_clk);
+
+ dmaengine_submit(dma_desc);
+ dma_async_issue_pending(apb_dma_chan);
+ ret = wait_for_completion_timeout(&apb_dma_wait, msecs_to_jiffies(50));
+
+ if (WARN(ret == 0, "apb read dma timed out"))
+ dmaengine_terminate_all(apb_dma_chan);
+ else
+ val = *apb_buffer;
+
+ clk_disable_unprepare(fuse_clk);
+out:
+ mutex_unlock(&apb_dma_lock);
+
+ return val;
+}
+
+static const struct of_device_id tegra20_fuse_of_match[] = {
+ { .compatible = "nvidia,tegra20-efuse" },
+ {},
+};
+
+static int apb_dma_init(void)
+{
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ apb_dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!apb_dma_chan)
+ return -EPROBE_DEFER;
+
+ apb_buffer = dma_alloc_coherent(NULL, sizeof(u32), &apb_buffer_phys,
+ GFP_KERNEL);
+ if (!apb_buffer) {
+ dma_release_channel(apb_dma_chan);
+ return -ENOMEM;
+ }
+
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 1;
+ dma_sconfig.dst_maxburst = 1;
+
+ return 0;
+}
+
+static int tegra20_fuse_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int err;
+
+ fuse_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fuse_clk)) {
+ dev_err(&pdev->dev, "missing clock");
+ return PTR_ERR(fuse_clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ fuse_phys = res->start;
+
+ err = apb_dma_init();
+ if (err)
+ return err;
+
+ if (tegra_fuse_create_sysfs(&pdev->dev, FUSE_SIZE, tegra20_fuse_readl))
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "loaded\n");
+
+ return 0;
+}
+
+static struct platform_driver tegra20_fuse_driver = {
+ .probe = tegra20_fuse_probe,
+ .driver = {
+ .name = "tegra20_fuse",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra20_fuse_of_match,
+ }
+};
+
+static int __init tegra20_fuse_init(void)
+{
+ return platform_driver_register(&tegra20_fuse_driver);
+}
+postcore_initcall(tegra20_fuse_init);
+
+/* Early boot code. This code is called before the devices are created */
+
+u32 __init tegra20_fuse_early(const unsigned int offset)
+{
+ return readl_relaxed(fuse_base + FUSE_BEGIN + offset);
+}
+
+bool __init tegra20_spare_fuse_early(int spare_bit)
+{
+ u32 offset = spare_bit * 4;
+ bool value;
+
+ value = tegra20_fuse_early(offset + 0x100);
+
+ return value;
+}
+
+static void __init tegra20_fuse_add_randomness(void)
+{
+ u32 randomness[7];
+
+ randomness[0] = tegra_sku_info.sku_id;
+ randomness[1] = tegra_read_straps();
+ randomness[2] = tegra_read_chipid();
+ randomness[3] = tegra_sku_info.cpu_process_id << 16;
+ randomness[3] |= tegra_sku_info.core_process_id;
+ randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
+ randomness[4] |= tegra_sku_info.soc_speedo_id;
+ randomness[5] = tegra20_fuse_early(FUSE_UID_LOW);
+ randomness[6] = tegra20_fuse_early(FUSE_UID_HIGH);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+void __init tegra20_init_fuse_early(void)
+{
+ fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
+
+ tegra_init_revision();
+ tegra20_init_speedo_data(&tegra_sku_info);
+ tegra20_fuse_add_randomness();
+
+ iounmap(fuse_base);
+}
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
new file mode 100644
index 000000000000..5999cf34ab70
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define FUSE_BEGIN 0x100
+
+/* Tegra30 and later */
+#define FUSE_VENDOR_CODE 0x100
+#define FUSE_FAB_CODE 0x104
+#define FUSE_LOT_CODE_0 0x108
+#define FUSE_LOT_CODE_1 0x10c
+#define FUSE_WAFER_ID 0x110
+#define FUSE_X_COORDINATE 0x114
+#define FUSE_Y_COORDINATE 0x118
+
+#define FUSE_HAS_REVISION_INFO BIT(0)
+
+enum speedo_idx {
+ SPEEDO_TEGRA30 = 0,
+ SPEEDO_TEGRA114,
+ SPEEDO_TEGRA124,
+};
+
+struct tegra_fuse_info {
+ int size;
+ int spare_bit;
+ enum speedo_idx speedo_idx;
+};
+
+static void __iomem *fuse_base;
+static struct clk *fuse_clk;
+static struct tegra_fuse_info *fuse_info;
+
+u32 tegra30_fuse_readl(const unsigned int offset)
+{
+ u32 val;
+
+ /*
+ * early in the boot, the fuse clock will be enabled by
+ * tegra_init_fuse()
+ */
+
+ if (fuse_clk)
+ clk_prepare_enable(fuse_clk);
+
+ val = readl_relaxed(fuse_base + FUSE_BEGIN + offset);
+
+ if (fuse_clk)
+ clk_disable_unprepare(fuse_clk);
+
+ return val;
+}
+
+static struct tegra_fuse_info tegra30_info = {
+ .size = 0x2a4,
+ .spare_bit = 0x144,
+ .speedo_idx = SPEEDO_TEGRA30,
+};
+
+static struct tegra_fuse_info tegra114_info = {
+ .size = 0x2a0,
+ .speedo_idx = SPEEDO_TEGRA114,
+};
+
+static struct tegra_fuse_info tegra124_info = {
+ .size = 0x300,
+ .speedo_idx = SPEEDO_TEGRA124,
+};
+
+static const struct of_device_id tegra30_fuse_of_match[] = {
+ { .compatible = "nvidia,tegra30-efuse", .data = &tegra30_info },
+ { .compatible = "nvidia,tegra114-efuse", .data = &tegra114_info },
+ { .compatible = "nvidia,tegra124-efuse", .data = &tegra124_info },
+ {},
+};
+
+static int tegra30_fuse_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_dev_id;
+
+ of_dev_id = of_match_device(tegra30_fuse_of_match, &pdev->dev);
+ if (!of_dev_id)
+ return -ENODEV;
+
+ fuse_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(fuse_clk)) {
+ dev_err(&pdev->dev, "missing clock");
+ return PTR_ERR(fuse_clk);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ if (tegra_fuse_create_sysfs(&pdev->dev, fuse_info->size,
+ tegra30_fuse_readl))
+ return -ENODEV;
+
+ dev_dbg(&pdev->dev, "loaded\n");
+
+ return 0;
+}
+
+static struct platform_driver tegra30_fuse_driver = {
+ .probe = tegra30_fuse_probe,
+ .driver = {
+ .name = "tegra_fuse",
+ .owner = THIS_MODULE,
+ .of_match_table = tegra30_fuse_of_match,
+ }
+};
+
+static int __init tegra30_fuse_init(void)
+{
+ return platform_driver_register(&tegra30_fuse_driver);
+}
+postcore_initcall(tegra30_fuse_init);
+
+/* Early boot code. This code is called before the devices are created */
+
+typedef void (*speedo_f)(struct tegra_sku_info *sku_info);
+
+static speedo_f __initdata speedo_tbl[] = {
+ [SPEEDO_TEGRA30] = tegra30_init_speedo_data,
+ [SPEEDO_TEGRA114] = tegra114_init_speedo_data,
+ [SPEEDO_TEGRA124] = tegra124_init_speedo_data,
+};
+
+static void __init tegra30_fuse_add_randomness(void)
+{
+ u32 randomness[12];
+
+ randomness[0] = tegra_sku_info.sku_id;
+ randomness[1] = tegra_read_straps();
+ randomness[2] = tegra_read_chipid();
+ randomness[3] = tegra_sku_info.cpu_process_id << 16;
+ randomness[3] |= tegra_sku_info.core_process_id;
+ randomness[4] = tegra_sku_info.cpu_speedo_id << 16;
+ randomness[4] |= tegra_sku_info.soc_speedo_id;
+ randomness[5] = tegra30_fuse_readl(FUSE_VENDOR_CODE);
+ randomness[6] = tegra30_fuse_readl(FUSE_FAB_CODE);
+ randomness[7] = tegra30_fuse_readl(FUSE_LOT_CODE_0);
+ randomness[8] = tegra30_fuse_readl(FUSE_LOT_CODE_1);
+ randomness[9] = tegra30_fuse_readl(FUSE_WAFER_ID);
+ randomness[10] = tegra30_fuse_readl(FUSE_X_COORDINATE);
+ randomness[11] = tegra30_fuse_readl(FUSE_Y_COORDINATE);
+
+ add_device_randomness(randomness, sizeof(randomness));
+}
+
+static void __init legacy_fuse_init(void)
+{
+ switch (tegra_get_chip_id()) {
+ case TEGRA30:
+ fuse_info = &tegra30_info;
+ break;
+ case TEGRA114:
+ fuse_info = &tegra114_info;
+ break;
+ case TEGRA124:
+ fuse_info = &tegra124_info;
+ break;
+ default:
+ return;
+ }
+
+ fuse_base = ioremap(TEGRA_FUSE_BASE, TEGRA_FUSE_SIZE);
+}
+
+bool __init tegra30_spare_fuse(int spare_bit)
+{
+ u32 offset = fuse_info->spare_bit + spare_bit * 4;
+
+ return tegra30_fuse_readl(offset) & 1;
+}
+
+void __init tegra30_init_fuse_early(void)
+{
+ struct device_node *np;
+ const struct of_device_id *of_match;
+
+ np = of_find_matching_node_and_match(NULL, tegra30_fuse_of_match,
+ &of_match);
+ if (np) {
+ fuse_base = of_iomap(np, 0);
+ fuse_info = (struct tegra_fuse_info *)of_match->data;
+ } else
+ legacy_fuse_init();
+
+ if (!fuse_base) {
+ pr_warn("fuse DT node missing and unknown chip id: 0x%02x\n",
+ tegra_get_chip_id());
+ return;
+ }
+
+ tegra_init_revision();
+ speedo_tbl[fuse_info->speedo_idx](&tegra_sku_info);
+ tegra30_fuse_add_randomness();
+}
diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
new file mode 100644
index 000000000000..3a398bf3572c
--- /dev/null
+++ b/drivers/soc/tegra/fuse/fuse.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_MISC_TEGRA_FUSE_H
+#define __DRIVERS_MISC_TEGRA_FUSE_H
+
+#define TEGRA_FUSE_BASE 0x7000f800
+#define TEGRA_FUSE_SIZE 0x400
+
+int tegra_fuse_create_sysfs(struct device *dev, int size,
+ u32 (*readl)(const unsigned int offset));
+
+bool tegra30_spare_fuse(int bit);
+u32 tegra30_fuse_readl(const unsigned int offset);
+void tegra30_init_fuse_early(void);
+void tegra_init_revision(void);
+void tegra_init_apbmisc(void);
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+void tegra20_init_speedo_data(struct tegra_sku_info *sku_info);
+bool tegra20_spare_fuse_early(int spare_bit);
+void tegra20_init_fuse_early(void);
+u32 tegra20_fuse_early(const unsigned int offset);
+#else
+static inline void tegra20_init_speedo_data(struct tegra_sku_info *sku_info) {}
+static inline bool tegra20_spare_fuse_early(int spare_bit)
+{
+ return false;
+}
+static inline void tegra20_init_fuse_early(void) {}
+static inline u32 tegra20_fuse_early(const unsigned int offset)
+{
+ return 0;
+}
+#endif
+
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+void tegra30_init_speedo_data(struct tegra_sku_info *sku_info);
+#else
+static inline void tegra30_init_speedo_data(struct tegra_sku_info *sku_info) {}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_114_SOC
+void tegra114_init_speedo_data(struct tegra_sku_info *sku_info);
+#else
+static inline void tegra114_init_speedo_data(struct tegra_sku_info *sku_info) {}
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+void tegra124_init_speedo_data(struct tegra_sku_info *sku_info);
+#else
+static inline void tegra124_init_speedo_data(struct tegra_sku_info *sku_info) {}
+#endif
+
+#endif
diff --git a/drivers/soc/tegra/fuse/speedo-tegra114.c b/drivers/soc/tegra/fuse/speedo-tegra114.c
new file mode 100644
index 000000000000..2a6ca036f09f
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra114.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CORE_PROCESS_CORNERS 2
+#define CPU_PROCESS_CORNERS 2
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+ {1123, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {1695, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ int *threshold)
+{
+ u32 tmp;
+ u32 sku = sku_info->sku_id;
+ enum tegra_revision rev = sku_info->revision;
+
+ switch (sku) {
+ case 0x00:
+ case 0x10:
+ case 0x05:
+ case 0x06:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+
+ case 0x03:
+ case 0x04:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+
+ default:
+ pr_err("Tegra Unknown SKU %d\n", sku);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+ }
+
+ if (rev == TEGRA_REVISION_A01) {
+ tmp = tegra30_fuse_readl(0x270) << 1;
+ tmp |= tegra30_fuse_readl(0x26c);
+ if (!tmp)
+ sku_info->cpu_speedo_id = 0;
+ }
+}
+
+void __init tegra114_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 cpu_speedo_val;
+ u32 core_speedo_val;
+ int threshold;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ rev_sku_to_speedo_ids(sku_info, &threshold);
+
+ cpu_speedo_val = tegra30_fuse_readl(0x12c) + 1024;
+ core_speedo_val = tegra30_fuse_readl(0x134);
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++)
+ if (cpu_speedo_val < cpu_process_speedos[threshold][i])
+ break;
+ sku_info->cpu_process_id = i;
+
+ for (i = 0; i < CORE_PROCESS_CORNERS; i++)
+ if (core_speedo_val < core_process_speedos[threshold][i])
+ break;
+ sku_info->core_process_id = i;
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
new file mode 100644
index 000000000000..46362387d974
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_PROCESS_CORNERS 2
+#define GPU_PROCESS_CORNERS 2
+#define CORE_PROCESS_CORNERS 2
+
+#define FUSE_CPU_SPEEDO_0 0x14
+#define FUSE_CPU_SPEEDO_1 0x2c
+#define FUSE_CPU_SPEEDO_2 0x30
+#define FUSE_SOC_SPEEDO_0 0x34
+#define FUSE_SOC_SPEEDO_1 0x38
+#define FUSE_SOC_SPEEDO_2 0x3c
+#define FUSE_CPU_IDDQ 0x18
+#define FUSE_SOC_IDDQ 0x40
+#define FUSE_GPU_IDDQ 0x128
+#define FUSE_FT_REV 0x28
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {2190, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst gpu_process_speedos[][GPU_PROCESS_CORNERS] = {
+ {1965, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+ {2101, UINT_MAX},
+ {0, UINT_MAX},
+};
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
+ int *threshold)
+{
+ int sku = sku_info->sku_id;
+
+ /* Assign to default */
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 0;
+ *threshold = THRESHOLD_INDEX_0;
+
+ switch (sku) {
+ case 0x00: /* Eng sku */
+ case 0x0F:
+ case 0x23:
+ /* Using the default */
+ break;
+ case 0x83:
+ sku_info->cpu_speedo_id = 2;
+ break;
+
+ case 0x1F:
+ case 0x87:
+ case 0x27:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 0;
+ sku_info->gpu_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_0;
+ break;
+ case 0x81:
+ case 0x21:
+ case 0x07:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ sku_info->gpu_speedo_id = 1;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+ case 0x49:
+ case 0x4A:
+ case 0x48:
+ sku_info->cpu_speedo_id = 4;
+ sku_info->soc_speedo_id = 2;
+ sku_info->gpu_speedo_id = 3;
+ *threshold = THRESHOLD_INDEX_1;
+ break;
+ default:
+ pr_err("Tegra Unknown SKU %d\n", sku);
+ /* Using the default for the error case */
+ break;
+ }
+}
+
+void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ int i, threshold, cpu_speedo_0_value, soc_speedo_0_value;
+ int cpu_iddq_value, gpu_iddq_value, soc_iddq_value;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(gpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+ cpu_speedo_0_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_0);
+
+ /* GPU Speedo is stored in CPU_SPEEDO_2 */
+ sku_info->gpu_speedo_value = tegra30_fuse_readl(FUSE_CPU_SPEEDO_2);
+
+ soc_speedo_0_value = tegra30_fuse_readl(FUSE_SOC_SPEEDO_0);
+
+ cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
+ soc_iddq_value = tegra30_fuse_readl(FUSE_SOC_IDDQ);
+ gpu_iddq_value = tegra30_fuse_readl(FUSE_GPU_IDDQ);
+
+ sku_info->cpu_speedo_value = cpu_speedo_0_value;
+
+ if (sku_info->cpu_speedo_value == 0) {
+ pr_warn("Tegra Warning: Speedo value not fused.\n");
+ WARN_ON(1);
+ return;
+ }
+
+ rev_sku_to_speedo_ids(sku_info, &threshold);
+
+ sku_info->cpu_iddq_value = tegra30_fuse_readl(FUSE_CPU_IDDQ);
+
+ for (i = 0; i < GPU_PROCESS_CORNERS; i++)
+ if (sku_info->gpu_speedo_value <
+ gpu_process_speedos[threshold][i])
+ break;
+ sku_info->gpu_process_id = i;
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++)
+ if (sku_info->cpu_speedo_value <
+ cpu_process_speedos[threshold][i])
+ break;
+ sku_info->cpu_process_id = i;
+
+ for (i = 0; i < CORE_PROCESS_CORNERS; i++)
+ if (soc_speedo_0_value <
+ core_process_speedos[threshold][i])
+ break;
+ sku_info->core_process_id = i;
+
+ pr_debug("Tegra GPU Speedo ID=%d, Speedo Value=%d\n",
+ sku_info->gpu_speedo_id, sku_info->gpu_speedo_value);
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra20.c b/drivers/soc/tegra/fuse/speedo-tegra20.c
new file mode 100644
index 000000000000..eff1b63f330d
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra20.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CPU_SPEEDO_LSBIT 20
+#define CPU_SPEEDO_MSBIT 29
+#define CPU_SPEEDO_REDUND_LSBIT 30
+#define CPU_SPEEDO_REDUND_MSBIT 39
+#define CPU_SPEEDO_REDUND_OFFS (CPU_SPEEDO_REDUND_MSBIT - CPU_SPEEDO_MSBIT)
+
+#define CORE_SPEEDO_LSBIT 40
+#define CORE_SPEEDO_MSBIT 47
+#define CORE_SPEEDO_REDUND_LSBIT 48
+#define CORE_SPEEDO_REDUND_MSBIT 55
+#define CORE_SPEEDO_REDUND_OFFS (CORE_SPEEDO_REDUND_MSBIT - CORE_SPEEDO_MSBIT)
+
+#define SPEEDO_MULT 4
+
+#define PROCESS_CORNERS_NUM 4
+
+#define SPEEDO_ID_SELECT_0(rev) ((rev) <= 2)
+#define SPEEDO_ID_SELECT_1(sku) \
+ (((sku) != 20) && ((sku) != 23) && ((sku) != 24) && \
+ ((sku) != 27) && ((sku) != 28))
+
+enum {
+ SPEEDO_ID_0,
+ SPEEDO_ID_1,
+ SPEEDO_ID_2,
+ SPEEDO_ID_COUNT,
+};
+
+static const u32 __initconst cpu_process_speedos[][PROCESS_CORNERS_NUM] = {
+ {315, 366, 420, UINT_MAX},
+ {303, 368, 419, UINT_MAX},
+ {316, 331, 383, UINT_MAX},
+};
+
+static const u32 __initconst core_process_speedos[][PROCESS_CORNERS_NUM] = {
+ {165, 195, 224, UINT_MAX},
+ {165, 195, 224, UINT_MAX},
+ {165, 195, 224, UINT_MAX},
+};
+
+void __init tegra20_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 reg;
+ u32 val;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) != SPEEDO_ID_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) != SPEEDO_ID_COUNT);
+
+ if (SPEEDO_ID_SELECT_0(sku_info->revision))
+ sku_info->soc_speedo_id = SPEEDO_ID_0;
+ else if (SPEEDO_ID_SELECT_1(sku_info->sku_id))
+ sku_info->soc_speedo_id = SPEEDO_ID_1;
+ else
+ sku_info->soc_speedo_id = SPEEDO_ID_2;
+
+ val = 0;
+ for (i = CPU_SPEEDO_MSBIT; i >= CPU_SPEEDO_LSBIT; i--) {
+ reg = tegra20_spare_fuse_early(i) |
+ tegra20_spare_fuse_early(i + CPU_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("Tegra CPU speedo value %u\n", val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= cpu_process_speedos[sku_info->soc_speedo_id][i])
+ break;
+ }
+ sku_info->cpu_process_id = i;
+
+ val = 0;
+ for (i = CORE_SPEEDO_MSBIT; i >= CORE_SPEEDO_LSBIT; i--) {
+ reg = tegra20_spare_fuse_early(i) |
+ tegra20_spare_fuse_early(i + CORE_SPEEDO_REDUND_OFFS);
+ val = (val << 1) | (reg & 0x1);
+ }
+ val = val * SPEEDO_MULT;
+ pr_debug("Core speedo value %u\n", val);
+
+ for (i = 0; i < (PROCESS_CORNERS_NUM - 1); i++) {
+ if (val <= core_process_speedos[sku_info->soc_speedo_id][i])
+ break;
+ }
+ sku_info->core_process_id = i;
+}
diff --git a/drivers/soc/tegra/fuse/speedo-tegra30.c b/drivers/soc/tegra/fuse/speedo-tegra30.c
new file mode 100644
index 000000000000..b17f0dcdfebe
--- /dev/null
+++ b/drivers/soc/tegra/fuse/speedo-tegra30.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2012-2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define CORE_PROCESS_CORNERS 1
+#define CPU_PROCESS_CORNERS 6
+
+#define FUSE_SPEEDO_CALIB_0 0x14
+#define FUSE_PACKAGE_INFO 0XFC
+#define FUSE_TEST_PROG_VER 0X28
+
+#define G_SPEEDO_BIT_MINUS1 58
+#define G_SPEEDO_BIT_MINUS1_R 59
+#define G_SPEEDO_BIT_MINUS2 60
+#define G_SPEEDO_BIT_MINUS2_R 61
+#define LP_SPEEDO_BIT_MINUS1 62
+#define LP_SPEEDO_BIT_MINUS1_R 63
+#define LP_SPEEDO_BIT_MINUS2 64
+#define LP_SPEEDO_BIT_MINUS2_R 65
+
+enum {
+ THRESHOLD_INDEX_0,
+ THRESHOLD_INDEX_1,
+ THRESHOLD_INDEX_2,
+ THRESHOLD_INDEX_3,
+ THRESHOLD_INDEX_4,
+ THRESHOLD_INDEX_5,
+ THRESHOLD_INDEX_6,
+ THRESHOLD_INDEX_7,
+ THRESHOLD_INDEX_8,
+ THRESHOLD_INDEX_9,
+ THRESHOLD_INDEX_10,
+ THRESHOLD_INDEX_11,
+ THRESHOLD_INDEX_COUNT,
+};
+
+static const u32 __initconst core_process_speedos[][CORE_PROCESS_CORNERS] = {
+ {180},
+ {170},
+ {195},
+ {180},
+ {168},
+ {192},
+ {180},
+ {170},
+ {195},
+ {180},
+ {180},
+ {180},
+};
+
+static const u32 __initconst cpu_process_speedos[][CPU_PROCESS_CORNERS] = {
+ {306, 338, 360, 376, UINT_MAX},
+ {295, 336, 358, 375, UINT_MAX},
+ {325, 325, 358, 375, UINT_MAX},
+ {325, 325, 358, 375, UINT_MAX},
+ {292, 324, 348, 364, UINT_MAX},
+ {324, 324, 348, 364, UINT_MAX},
+ {324, 324, 348, 364, UINT_MAX},
+ {295, 336, 358, 375, UINT_MAX},
+ {358, 358, 358, 358, 397, UINT_MAX},
+ {364, 364, 364, 364, 397, UINT_MAX},
+ {295, 336, 358, 375, 391, UINT_MAX},
+ {295, 336, 358, 375, 391, UINT_MAX},
+};
+
+static int threshold_index __initdata;
+
+static void __init fuse_speedo_calib(u32 *speedo_g, u32 *speedo_lp)
+{
+ u32 reg;
+ int ate_ver;
+ int bit_minus1;
+ int bit_minus2;
+
+ reg = tegra30_fuse_readl(FUSE_SPEEDO_CALIB_0);
+
+ *speedo_lp = (reg & 0xFFFF) * 4;
+ *speedo_g = ((reg >> 16) & 0xFFFF) * 4;
+
+ ate_ver = tegra30_fuse_readl(FUSE_TEST_PROG_VER);
+ pr_debug("Tegra ATE prog ver %d.%d\n", ate_ver/10, ate_ver%10);
+
+ if (ate_ver >= 26) {
+ bit_minus1 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra30_spare_fuse(LP_SPEEDO_BIT_MINUS2_R);
+ *speedo_lp |= (bit_minus1 << 1) | bit_minus2;
+
+ bit_minus1 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1);
+ bit_minus1 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS1_R);
+ bit_minus2 = tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2);
+ bit_minus2 |= tegra30_spare_fuse(G_SPEEDO_BIT_MINUS2_R);
+ *speedo_g |= (bit_minus1 << 1) | bit_minus2;
+ } else {
+ *speedo_lp |= 0x3;
+ *speedo_g |= 0x3;
+ }
+}
+
+static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info)
+{
+ int package_id = tegra30_fuse_readl(FUSE_PACKAGE_INFO) & 0x0F;
+
+ switch (sku_info->revision) {
+ case TEGRA_REVISION_A01:
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ case TEGRA_REVISION_A02:
+ case TEGRA_REVISION_A03:
+ switch (sku_info->sku_id) {
+ case 0x87:
+ case 0x82:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_1;
+ break;
+ case 0x81:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_2;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 4;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_7;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x80:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 5;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_8;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 6;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_9;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x83:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 7;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_10;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_3;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ case 0x8F:
+ sku_info->cpu_speedo_id = 8;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_11;
+ break;
+ case 0x08:
+ sku_info->cpu_speedo_id = 1;
+ sku_info->soc_speedo_id = 1;
+ threshold_index = THRESHOLD_INDEX_4;
+ break;
+ case 0x02:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_5;
+ break;
+ case 0x04:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_6;
+ break;
+ case 0:
+ switch (package_id) {
+ case 1:
+ sku_info->cpu_speedo_id = 2;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_2;
+ break;
+ case 2:
+ sku_info->cpu_speedo_id = 3;
+ sku_info->soc_speedo_id = 2;
+ threshold_index = THRESHOLD_INDEX_3;
+ break;
+ default:
+ pr_err("Tegra Unknown pkg %d\n", package_id);
+ break;
+ }
+ break;
+ default:
+ pr_warn("Tegra Unknown SKU %d\n", sku_info->sku_id);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ }
+ break;
+ default:
+ pr_warn("Tegra Unknown chip rev %d\n", sku_info->revision);
+ sku_info->cpu_speedo_id = 0;
+ sku_info->soc_speedo_id = 0;
+ threshold_index = THRESHOLD_INDEX_0;
+ break;
+ }
+}
+
+void __init tegra30_init_speedo_data(struct tegra_sku_info *sku_info)
+{
+ u32 cpu_speedo_val;
+ u32 core_speedo_val;
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(core_process_speedos) !=
+ THRESHOLD_INDEX_COUNT);
+
+
+ rev_sku_to_speedo_ids(sku_info);
+ fuse_speedo_calib(&cpu_speedo_val, &core_speedo_val);
+ pr_debug("Tegra CPU speedo value %u\n", cpu_speedo_val);
+ pr_debug("Tegra Core speedo value %u\n", core_speedo_val);
+
+ for (i = 0; i < CPU_PROCESS_CORNERS; i++) {
+ if (cpu_speedo_val < cpu_process_speedos[threshold_index][i])
+ break;
+ }
+ sku_info->cpu_process_id = i - 1;
+
+ if (sku_info->cpu_process_id == -1) {
+ pr_warn("Tegra CPU speedo value %3d out of range",
+ cpu_speedo_val);
+ sku_info->cpu_process_id = 0;
+ sku_info->cpu_speedo_id = 1;
+ }
+
+ for (i = 0; i < CORE_PROCESS_CORNERS; i++) {
+ if (core_speedo_val < core_process_speedos[threshold_index][i])
+ break;
+ }
+ sku_info->core_process_id = i - 1;
+
+ if (sku_info->core_process_id == -1) {
+ pr_warn("Tegra CORE speedo value %3d out of range",
+ core_speedo_val);
+ sku_info->core_process_id = 0;
+ sku_info->soc_speedo_id = 1;
+ }
+}
diff --git a/drivers/soc/tegra/fuse/tegra-apbmisc.c b/drivers/soc/tegra/fuse/tegra-apbmisc.c
new file mode 100644
index 000000000000..3bf5aba4caaa
--- /dev/null
+++ b/drivers/soc/tegra/fuse/tegra-apbmisc.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+#include <soc/tegra/fuse.h>
+
+#include "fuse.h"
+
+#define APBMISC_BASE 0x70000800
+#define APBMISC_SIZE 0x64
+#define FUSE_SKU_INFO 0x10
+
+static void __iomem *apbmisc_base;
+static void __iomem *strapping_base;
+
+u32 tegra_read_chipid(void)
+{
+ return readl_relaxed(apbmisc_base + 4);
+}
+
+u8 tegra_get_chip_id(void)
+{
+ if (!apbmisc_base) {
+ WARN(1, "Tegra Chip ID not yet available\n");
+ return 0;
+ }
+
+ return (tegra_read_chipid() >> 8) & 0xff;
+}
+
+u32 tegra_read_straps(void)
+{
+ if (strapping_base)
+ return readl_relaxed(strapping_base);
+ else
+ return 0;
+}
+
+static const struct of_device_id apbmisc_match[] __initconst = {
+ { .compatible = "nvidia,tegra20-apbmisc", },
+ {},
+};
+
+void __init tegra_init_revision(void)
+{
+ u32 id, chip_id, minor_rev;
+ int rev;
+
+ id = tegra_read_chipid();
+ chip_id = (id >> 8) & 0xff;
+ minor_rev = (id >> 16) & 0xf;
+
+ switch (minor_rev) {
+ case 1:
+ rev = TEGRA_REVISION_A01;
+ break;
+ case 2:
+ rev = TEGRA_REVISION_A02;
+ break;
+ case 3:
+ if (chip_id == TEGRA20 && (tegra20_spare_fuse_early(18) ||
+ tegra20_spare_fuse_early(19)))
+ rev = TEGRA_REVISION_A03p;
+ else
+ rev = TEGRA_REVISION_A03;
+ break;
+ case 4:
+ rev = TEGRA_REVISION_A04;
+ break;
+ default:
+ rev = TEGRA_REVISION_UNKNOWN;
+ }
+
+ tegra_sku_info.revision = rev;
+
+ if (chip_id == TEGRA20)
+ tegra_sku_info.sku_id = tegra20_fuse_early(FUSE_SKU_INFO);
+ else
+ tegra_sku_info.sku_id = tegra30_fuse_readl(FUSE_SKU_INFO);
+}
+
+void __init tegra_init_apbmisc(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, apbmisc_match);
+ apbmisc_base = of_iomap(np, 0);
+ if (!apbmisc_base) {
+ pr_warn("ioremap tegra apbmisc failed. using %08x instead\n",
+ APBMISC_BASE);
+ apbmisc_base = ioremap(APBMISC_BASE, APBMISC_SIZE);
+ }
+
+ strapping_base = of_iomap(np, 1);
+ if (!strapping_base)
+ pr_err("ioremap tegra strapping_base failed\n");
+}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
new file mode 100644
index 000000000000..a2c0ceb95f8f
--- /dev/null
+++ b/drivers/soc/tegra/pmc.c
@@ -0,0 +1,957 @@
+/*
+ * drivers/soc/tegra/pmc.c
+ *
+ * Copyright (c) 2010 Google, Inc
+ *
+ * Author:
+ * Colin Cross <ccross@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <soc/tegra/common.h>
+#include <soc/tegra/fuse.h>
+#include <soc/tegra/pmc.h>
+
+#define PMC_CNTRL 0x0
+#define PMC_CNTRL_SYSCLK_POLARITY (1 << 10) /* sys clk polarity */
+#define PMC_CNTRL_SYSCLK_OE (1 << 11) /* system clock enable */
+#define PMC_CNTRL_SIDE_EFFECT_LP0 (1 << 14) /* LP0 when CPU pwr gated */
+#define PMC_CNTRL_CPU_PWRREQ_POLARITY (1 << 15) /* CPU pwr req polarity */
+#define PMC_CNTRL_CPU_PWRREQ_OE (1 << 16) /* CPU pwr req enable */
+#define PMC_CNTRL_INTR_POLARITY (1 << 17) /* inverts INTR polarity */
+
+#define DPD_SAMPLE 0x020
+#define DPD_SAMPLE_ENABLE (1 << 0)
+#define DPD_SAMPLE_DISABLE (0 << 0)
+
+#define PWRGATE_TOGGLE 0x30
+#define PWRGATE_TOGGLE_START (1 << 8)
+
+#define REMOVE_CLAMPING 0x34
+
+#define PWRGATE_STATUS 0x38
+
+#define PMC_SCRATCH0 0x50
+#define PMC_SCRATCH0_MODE_RECOVERY (1 << 31)
+#define PMC_SCRATCH0_MODE_BOOTLOADER (1 << 30)
+#define PMC_SCRATCH0_MODE_RCM (1 << 1)
+#define PMC_SCRATCH0_MODE_MASK (PMC_SCRATCH0_MODE_RECOVERY | \
+ PMC_SCRATCH0_MODE_BOOTLOADER | \
+ PMC_SCRATCH0_MODE_RCM)
+
+#define PMC_CPUPWRGOOD_TIMER 0xc8
+#define PMC_CPUPWROFF_TIMER 0xcc
+
+#define PMC_SCRATCH41 0x140
+
+#define IO_DPD_REQ 0x1b8
+#define IO_DPD_REQ_CODE_IDLE (0 << 30)
+#define IO_DPD_REQ_CODE_OFF (1 << 30)
+#define IO_DPD_REQ_CODE_ON (2 << 30)
+#define IO_DPD_REQ_CODE_MASK (3 << 30)
+
+#define IO_DPD_STATUS 0x1bc
+#define IO_DPD2_REQ 0x1c0
+#define IO_DPD2_STATUS 0x1c4
+#define SEL_DPD_TIM 0x1c8
+
+#define GPU_RG_CNTRL 0x2d4
+
+struct tegra_pmc_soc {
+ unsigned int num_powergates;
+ const char *const *powergates;
+ unsigned int num_cpu_powergates;
+ const u8 *cpu_powergates;
+};
+
+/**
+ * struct tegra_pmc - NVIDIA Tegra PMC
+ * @base: pointer to I/O remapped register region
+ * @clk: pointer to pclk clock
+ * @rate: currently configured rate of pclk
+ * @suspend_mode: lowest suspend mode available
+ * @cpu_good_time: CPU power good time (in microseconds)
+ * @cpu_off_time: CPU power off time (in microsecends)
+ * @core_osc_time: core power good OSC time (in microseconds)
+ * @core_pmu_time: core power good PMU time (in microseconds)
+ * @core_off_time: core power off time (in microseconds)
+ * @corereq_high: core power request is active-high
+ * @sysclkreq_high: system clock request is active-high
+ * @combined_req: combined power request for CPU & core
+ * @cpu_pwr_good_en: CPU power good signal is enabled
+ * @lp0_vec_phys: physical base address of the LP0 warm boot code
+ * @lp0_vec_size: size of the LP0 warm boot code
+ * @powergates_lock: mutex for power gate register access
+ */
+struct tegra_pmc {
+ void __iomem *base;
+ struct clk *clk;
+
+ const struct tegra_pmc_soc *soc;
+
+ unsigned long rate;
+
+ enum tegra_suspend_mode suspend_mode;
+ u32 cpu_good_time;
+ u32 cpu_off_time;
+ u32 core_osc_time;
+ u32 core_pmu_time;
+ u32 core_off_time;
+ bool corereq_high;
+ bool sysclkreq_high;
+ bool combined_req;
+ bool cpu_pwr_good_en;
+ u32 lp0_vec_phys;
+ u32 lp0_vec_size;
+
+ struct mutex powergates_lock;
+};
+
+static struct tegra_pmc *pmc = &(struct tegra_pmc) {
+ .base = NULL,
+ .suspend_mode = TEGRA_SUSPEND_NONE,
+};
+
+static u32 tegra_pmc_readl(unsigned long offset)
+{
+ return readl(pmc->base + offset);
+}
+
+static void tegra_pmc_writel(u32 value, unsigned long offset)
+{
+ writel(value, pmc->base + offset);
+}
+
+/**
+ * tegra_powergate_set() - set the state of a partition
+ * @id: partition ID
+ * @new_state: new state of the partition
+ */
+static int tegra_powergate_set(int id, bool new_state)
+{
+ bool status;
+
+ mutex_lock(&pmc->powergates_lock);
+
+ status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id);
+
+ if (status == new_state) {
+ mutex_unlock(&pmc->powergates_lock);
+ return 0;
+ }
+
+ tegra_pmc_writel(PWRGATE_TOGGLE_START | id, PWRGATE_TOGGLE);
+
+ mutex_unlock(&pmc->powergates_lock);
+
+ return 0;
+}
+
+/**
+ * tegra_powergate_power_on() - power on partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_on(int id)
+{
+ if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ return -EINVAL;
+
+ return tegra_powergate_set(id, true);
+}
+
+/**
+ * tegra_powergate_power_off() - power off partition
+ * @id: partition ID
+ */
+int tegra_powergate_power_off(int id)
+{
+ if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ return -EINVAL;
+
+ return tegra_powergate_set(id, false);
+}
+EXPORT_SYMBOL(tegra_powergate_power_off);
+
+/**
+ * tegra_powergate_is_powered() - check if partition is powered
+ * @id: partition ID
+ */
+int tegra_powergate_is_powered(int id)
+{
+ u32 status;
+
+ if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ return -EINVAL;
+
+ status = tegra_pmc_readl(PWRGATE_STATUS) & (1 << id);
+ return !!status;
+}
+
+/**
+ * tegra_powergate_remove_clamping() - remove power clamps for partition
+ * @id: partition ID
+ */
+int tegra_powergate_remove_clamping(int id)
+{
+ u32 mask;
+
+ if (!pmc->soc || id < 0 || id >= pmc->soc->num_powergates)
+ return -EINVAL;
+
+ /*
+ * The Tegra124 GPU has a separate register (with different semantics)
+ * to remove clamps.
+ */
+ if (tegra_get_chip_id() == TEGRA124) {
+ if (id == TEGRA_POWERGATE_3D) {
+ tegra_pmc_writel(0, GPU_RG_CNTRL);
+ return 0;
+ }
+ }
+
+ /*
+ * Tegra 2 has a bug where PCIE and VDE clamping masks are
+ * swapped relatively to the partition ids
+ */
+ if (id == TEGRA_POWERGATE_VDEC)
+ mask = (1 << TEGRA_POWERGATE_PCIE);
+ else if (id == TEGRA_POWERGATE_PCIE)
+ mask = (1 << TEGRA_POWERGATE_VDEC);
+ else
+ mask = (1 << id);
+
+ tegra_pmc_writel(mask, REMOVE_CLAMPING);
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_powergate_remove_clamping);
+
+/**
+ * tegra_powergate_sequence_power_up() - power up partition
+ * @id: partition ID
+ * @clk: clock for partition
+ * @rst: reset for partition
+ *
+ * Must be called with clk disabled, and returns with clk enabled.
+ */
+int tegra_powergate_sequence_power_up(int id, struct clk *clk,
+ struct reset_control *rst)
+{
+ int ret;
+
+ reset_control_assert(rst);
+
+ ret = tegra_powergate_power_on(id);
+ if (ret)
+ goto err_power;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto err_clk;
+
+ usleep_range(10, 20);
+
+ ret = tegra_powergate_remove_clamping(id);
+ if (ret)
+ goto err_clamp;
+
+ usleep_range(10, 20);
+ reset_control_deassert(rst);
+
+ return 0;
+
+err_clamp:
+ clk_disable_unprepare(clk);
+err_clk:
+ tegra_powergate_power_off(id);
+err_power:
+ return ret;
+}
+EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
+
+#ifdef CONFIG_SMP
+/**
+ * tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
+ * @cpuid: CPU partition ID
+ *
+ * Returns the partition ID corresponding to the CPU partition ID or a
+ * negative error code on failure.
+ */
+static int tegra_get_cpu_powergate_id(int cpuid)
+{
+ if (pmc->soc && cpuid > 0 && cpuid < pmc->soc->num_cpu_powergates)
+ return pmc->soc->cpu_powergates[cpuid];
+
+ return -EINVAL;
+}
+
+/**
+ * tegra_pmc_cpu_is_powered() - check if CPU partition is powered
+ * @cpuid: CPU partition ID
+ */
+bool tegra_pmc_cpu_is_powered(int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(cpuid);
+ if (id < 0)
+ return false;
+
+ return tegra_powergate_is_powered(id);
+}
+
+/**
+ * tegra_pmc_cpu_power_on() - power on CPU partition
+ * @cpuid: CPU partition ID
+ */
+int tegra_pmc_cpu_power_on(int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(cpuid);
+ if (id < 0)
+ return id;
+
+ return tegra_powergate_set(id, true);
+}
+
+/**
+ * tegra_pmc_cpu_remove_clamping() - remove power clamps for CPU partition
+ * @cpuid: CPU partition ID
+ */
+int tegra_pmc_cpu_remove_clamping(int cpuid)
+{
+ int id;
+
+ id = tegra_get_cpu_powergate_id(cpuid);
+ if (id < 0)
+ return id;
+
+ return tegra_powergate_remove_clamping(id);
+}
+#endif /* CONFIG_SMP */
+
+/**
+ * tegra_pmc_restart() - reboot the system
+ * @mode: which mode to reboot in
+ * @cmd: reboot command
+ */
+void tegra_pmc_restart(enum reboot_mode mode, const char *cmd)
+{
+ u32 value;
+
+ value = tegra_pmc_readl(PMC_SCRATCH0);
+ value &= ~PMC_SCRATCH0_MODE_MASK;
+
+ if (cmd) {
+ if (strcmp(cmd, "recovery") == 0)
+ value |= PMC_SCRATCH0_MODE_RECOVERY;
+
+ if (strcmp(cmd, "bootloader") == 0)
+ value |= PMC_SCRATCH0_MODE_BOOTLOADER;
+
+ if (strcmp(cmd, "forced-recovery") == 0)
+ value |= PMC_SCRATCH0_MODE_RCM;
+ }
+
+ tegra_pmc_writel(value, PMC_SCRATCH0);
+
+ value = tegra_pmc_readl(0);
+ value |= 0x10;
+ tegra_pmc_writel(value, 0);
+}
+
+static int powergate_show(struct seq_file *s, void *data)
+{
+ unsigned int i;
+
+ seq_printf(s, " powergate powered\n");
+ seq_printf(s, "------------------\n");
+
+ for (i = 0; i < pmc->soc->num_powergates; i++) {
+ if (!pmc->soc->powergates[i])
+ continue;
+
+ seq_printf(s, " %9s %7s\n", pmc->soc->powergates[i],
+ tegra_powergate_is_powered(i) ? "yes" : "no");
+ }
+
+ return 0;
+}
+
+static int powergate_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, powergate_show, inode->i_private);
+}
+
+static const struct file_operations powergate_fops = {
+ .open = powergate_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int tegra_powergate_debugfs_init(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+ &powergate_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int tegra_io_rail_prepare(int id, unsigned long *request,
+ unsigned long *status, unsigned int *bit)
+{
+ unsigned long rate, value;
+ struct clk *clk;
+
+ *bit = id % 32;
+
+ /*
+ * There are two sets of 30 bits to select IO rails, but bits 30 and
+ * 31 are control bits rather than IO rail selection bits.
+ */
+ if (id > 63 || *bit == 30 || *bit == 31)
+ return -EINVAL;
+
+ if (id < 32) {
+ *status = IO_DPD_STATUS;
+ *request = IO_DPD_REQ;
+ } else {
+ *status = IO_DPD2_STATUS;
+ *request = IO_DPD2_REQ;
+ }
+
+ clk = clk_get_sys(NULL, "pclk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ tegra_pmc_writel(DPD_SAMPLE_ENABLE, DPD_SAMPLE);
+
+ /* must be at least 200 ns, in APB (PCLK) clock cycles */
+ value = DIV_ROUND_UP(1000000000, rate);
+ value = DIV_ROUND_UP(200, value);
+ tegra_pmc_writel(value, SEL_DPD_TIM);
+
+ return 0;
+}
+
+static int tegra_io_rail_poll(unsigned long offset, unsigned long mask,
+ unsigned long val, unsigned long timeout)
+{
+ unsigned long value;
+
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while (time_after(timeout, jiffies)) {
+ value = tegra_pmc_readl(offset);
+ if ((value & mask) == val)
+ return 0;
+
+ usleep_range(250, 1000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void tegra_io_rail_unprepare(void)
+{
+ tegra_pmc_writel(DPD_SAMPLE_DISABLE, DPD_SAMPLE);
+}
+
+int tegra_io_rail_power_on(int id)
+{
+ unsigned long request, status, value;
+ unsigned int bit, mask;
+ int err;
+
+ err = tegra_io_rail_prepare(id, &request, &status, &bit);
+ if (err < 0)
+ return err;
+
+ mask = 1 << bit;
+
+ value = tegra_pmc_readl(request);
+ value |= mask;
+ value &= ~IO_DPD_REQ_CODE_MASK;
+ value |= IO_DPD_REQ_CODE_OFF;
+ tegra_pmc_writel(value, request);
+
+ err = tegra_io_rail_poll(status, mask, 0, 250);
+ if (err < 0)
+ return err;
+
+ tegra_io_rail_unprepare();
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_io_rail_power_on);
+
+int tegra_io_rail_power_off(int id)
+{
+ unsigned long request, status, value;
+ unsigned int bit, mask;
+ int err;
+
+ err = tegra_io_rail_prepare(id, &request, &status, &bit);
+ if (err < 0)
+ return err;
+
+ mask = 1 << bit;
+
+ value = tegra_pmc_readl(request);
+ value |= mask;
+ value &= ~IO_DPD_REQ_CODE_MASK;
+ value |= IO_DPD_REQ_CODE_ON;
+ tegra_pmc_writel(value, request);
+
+ err = tegra_io_rail_poll(status, mask, mask, 250);
+ if (err < 0)
+ return err;
+
+ tegra_io_rail_unprepare();
+
+ return 0;
+}
+EXPORT_SYMBOL(tegra_io_rail_power_off);
+
+#ifdef CONFIG_PM_SLEEP
+enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
+{
+ return pmc->suspend_mode;
+}
+
+void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
+{
+ if (mode < TEGRA_SUSPEND_NONE || mode >= TEGRA_MAX_SUSPEND_MODE)
+ return;
+
+ pmc->suspend_mode = mode;
+}
+
+void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
+{
+ unsigned long long rate = 0;
+ u32 value;
+
+ switch (mode) {
+ case TEGRA_SUSPEND_LP1:
+ rate = 32768;
+ break;
+
+ case TEGRA_SUSPEND_LP2:
+ rate = clk_get_rate(pmc->clk);
+ break;
+
+ default:
+ break;
+ }
+
+ if (WARN_ON_ONCE(rate == 0))
+ rate = 100000000;
+
+ if (rate != pmc->rate) {
+ u64 ticks;
+
+ ticks = pmc->cpu_good_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(ticks, PMC_CPUPWRGOOD_TIMER);
+
+ ticks = pmc->cpu_off_time * rate + USEC_PER_SEC - 1;
+ do_div(ticks, USEC_PER_SEC);
+ tegra_pmc_writel(ticks, PMC_CPUPWROFF_TIMER);
+
+ wmb();
+
+ pmc->rate = rate;
+ }
+
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value &= ~PMC_CNTRL_SIDE_EFFECT_LP0;
+ value |= PMC_CNTRL_CPU_PWRREQ_OE;
+ tegra_pmc_writel(value, PMC_CNTRL);
+}
+#endif
+
+static int tegra_pmc_parse_dt(struct tegra_pmc *pmc, struct device_node *np)
+{
+ u32 value, values[2];
+
+ if (of_property_read_u32(np, "nvidia,suspend-mode", &value)) {
+ } else {
+ switch (value) {
+ case 0:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP0;
+ break;
+
+ case 1:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP1;
+ break;
+
+ case 2:
+ pmc->suspend_mode = TEGRA_SUSPEND_LP2;
+ break;
+
+ default:
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+ break;
+ }
+ }
+
+ pmc->suspend_mode = tegra_pm_validate_suspend_mode(pmc->suspend_mode);
+
+ if (of_property_read_u32(np, "nvidia,cpu-pwr-good-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->cpu_good_time = value;
+
+ if (of_property_read_u32(np, "nvidia,cpu-pwr-off-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->cpu_off_time = value;
+
+ if (of_property_read_u32_array(np, "nvidia,core-pwr-good-time",
+ values, ARRAY_SIZE(values)))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->core_osc_time = values[0];
+ pmc->core_pmu_time = values[1];
+
+ if (of_property_read_u32(np, "nvidia,core-pwr-off-time", &value))
+ pmc->suspend_mode = TEGRA_SUSPEND_NONE;
+
+ pmc->core_off_time = value;
+
+ pmc->corereq_high = of_property_read_bool(np,
+ "nvidia,core-power-req-active-high");
+
+ pmc->sysclkreq_high = of_property_read_bool(np,
+ "nvidia,sys-clock-req-active-high");
+
+ pmc->combined_req = of_property_read_bool(np,
+ "nvidia,combined-power-req");
+
+ pmc->cpu_pwr_good_en = of_property_read_bool(np,
+ "nvidia,cpu-pwr-good-en");
+
+ if (of_property_read_u32_array(np, "nvidia,lp0-vec", values,
+ ARRAY_SIZE(values)))
+ if (pmc->suspend_mode == TEGRA_SUSPEND_LP0)
+ pmc->suspend_mode = TEGRA_SUSPEND_LP1;
+
+ pmc->lp0_vec_phys = values[0];
+ pmc->lp0_vec_size = values[1];
+
+ return 0;
+}
+
+static void tegra_pmc_init(struct tegra_pmc *pmc)
+{
+ u32 value;
+
+ /* Always enable CPU power request */
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value |= PMC_CNTRL_CPU_PWRREQ_OE;
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ value = tegra_pmc_readl(PMC_CNTRL);
+
+ if (pmc->sysclkreq_high)
+ value &= ~PMC_CNTRL_SYSCLK_POLARITY;
+ else
+ value |= PMC_CNTRL_SYSCLK_POLARITY;
+
+ /* configure the output polarity while the request is tristated */
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ /* now enable the request */
+ value = tegra_pmc_readl(PMC_CNTRL);
+ value |= PMC_CNTRL_SYSCLK_OE;
+ tegra_pmc_writel(value, PMC_CNTRL);
+}
+
+static int tegra_pmc_probe(struct platform_device *pdev)
+{
+ void __iomem *base = pmc->base;
+ struct resource *res;
+ int err;
+
+ err = tegra_pmc_parse_dt(pmc, pdev->dev.of_node);
+ if (err < 0)
+ return err;
+
+ /* take over the memory region from the early initialization */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmc->base))
+ return PTR_ERR(pmc->base);
+
+ iounmap(base);
+
+ pmc->clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(pmc->clk)) {
+ err = PTR_ERR(pmc->clk);
+ dev_err(&pdev->dev, "failed to get pclk: %d\n", err);
+ return err;
+ }
+
+ tegra_pmc_init(pmc);
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ err = tegra_powergate_debugfs_init();
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_pmc_suspend(struct device *dev)
+{
+ tegra_pmc_writel(virt_to_phys(tegra_resume), PMC_SCRATCH41);
+
+ return 0;
+}
+
+static int tegra_pmc_resume(struct device *dev)
+{
+ tegra_pmc_writel(0x0, PMC_SCRATCH41);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tegra_pmc_pm_ops, tegra_pmc_suspend, tegra_pmc_resume);
+
+static const char * const tegra20_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+};
+
+static const struct tegra_pmc_soc tegra20_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra20_powergates),
+ .powergates = tegra20_powergates,
+ .num_cpu_powergates = 0,
+ .cpu_powergates = NULL,
+};
+
+static const char * const tegra30_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "cpu0",
+ [TEGRA_POWERGATE_3D] = "3d0",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_3D1] = "3d1",
+};
+
+static const u8 tegra30_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra30_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra30_powergates),
+ .powergates = tegra30_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates),
+ .cpu_powergates = tegra30_cpu_powergates,
+};
+
+static const char * const tegra114_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+};
+
+static const u8 tegra114_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra114_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra114_powergates),
+ .powergates = tegra114_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra114_cpu_powergates),
+ .cpu_powergates = tegra114_cpu_powergates,
+};
+
+static const char * const tegra124_powergates[] = {
+ [TEGRA_POWERGATE_CPU] = "crail",
+ [TEGRA_POWERGATE_3D] = "3d",
+ [TEGRA_POWERGATE_VENC] = "venc",
+ [TEGRA_POWERGATE_PCIE] = "pcie",
+ [TEGRA_POWERGATE_VDEC] = "vdec",
+ [TEGRA_POWERGATE_L2] = "l2",
+ [TEGRA_POWERGATE_MPE] = "mpe",
+ [TEGRA_POWERGATE_HEG] = "heg",
+ [TEGRA_POWERGATE_SATA] = "sata",
+ [TEGRA_POWERGATE_CPU1] = "cpu1",
+ [TEGRA_POWERGATE_CPU2] = "cpu2",
+ [TEGRA_POWERGATE_CPU3] = "cpu3",
+ [TEGRA_POWERGATE_CELP] = "celp",
+ [TEGRA_POWERGATE_CPU0] = "cpu0",
+ [TEGRA_POWERGATE_C0NC] = "c0nc",
+ [TEGRA_POWERGATE_C1NC] = "c1nc",
+ [TEGRA_POWERGATE_SOR] = "sor",
+ [TEGRA_POWERGATE_DIS] = "dis",
+ [TEGRA_POWERGATE_DISB] = "disb",
+ [TEGRA_POWERGATE_XUSBA] = "xusba",
+ [TEGRA_POWERGATE_XUSBB] = "xusbb",
+ [TEGRA_POWERGATE_XUSBC] = "xusbc",
+ [TEGRA_POWERGATE_VIC] = "vic",
+ [TEGRA_POWERGATE_IRAM] = "iram",
+};
+
+static const u8 tegra124_cpu_powergates[] = {
+ TEGRA_POWERGATE_CPU0,
+ TEGRA_POWERGATE_CPU1,
+ TEGRA_POWERGATE_CPU2,
+ TEGRA_POWERGATE_CPU3,
+};
+
+static const struct tegra_pmc_soc tegra124_pmc_soc = {
+ .num_powergates = ARRAY_SIZE(tegra124_powergates),
+ .powergates = tegra124_powergates,
+ .num_cpu_powergates = ARRAY_SIZE(tegra124_cpu_powergates),
+ .cpu_powergates = tegra124_cpu_powergates,
+};
+
+static const struct of_device_id tegra_pmc_match[] = {
+ { .compatible = "nvidia,tegra124-pmc", .data = &tegra124_pmc_soc },
+ { .compatible = "nvidia,tegra114-pmc", .data = &tegra114_pmc_soc },
+ { .compatible = "nvidia,tegra30-pmc", .data = &tegra30_pmc_soc },
+ { .compatible = "nvidia,tegra20-pmc", .data = &tegra20_pmc_soc },
+ { }
+};
+
+static struct platform_driver tegra_pmc_driver = {
+ .driver = {
+ .name = "tegra-pmc",
+ .suppress_bind_attrs = true,
+ .of_match_table = tegra_pmc_match,
+ .pm = &tegra_pmc_pm_ops,
+ },
+ .probe = tegra_pmc_probe,
+};
+module_platform_driver(tegra_pmc_driver);
+
+/*
+ * Early initialization to allow access to registers in the very early boot
+ * process.
+ */
+static int __init tegra_pmc_early_init(void)
+{
+ const struct of_device_id *match;
+ struct device_node *np;
+ struct resource regs;
+ bool invert;
+ u32 value;
+
+ if (!soc_is_tegra())
+ return 0;
+
+ np = of_find_matching_node_and_match(NULL, tegra_pmc_match, &match);
+ if (!np) {
+ pr_warn("PMC device node not found, disabling powergating\n");
+
+ regs.start = 0x7000e400;
+ regs.end = 0x7000e7ff;
+ regs.flags = IORESOURCE_MEM;
+
+ pr_warn("Using memory region %pR\n", &regs);
+ } else {
+ pmc->soc = match->data;
+ }
+
+ if (of_address_to_resource(np, 0, &regs) < 0) {
+ pr_err("failed to get PMC registers\n");
+ return -ENXIO;
+ }
+
+ pmc->base = ioremap_nocache(regs.start, resource_size(&regs));
+ if (!pmc->base) {
+ pr_err("failed to map PMC registers\n");
+ return -ENXIO;
+ }
+
+ mutex_init(&pmc->powergates_lock);
+
+ invert = of_property_read_bool(np, "nvidia,invert-interrupt");
+
+ value = tegra_pmc_readl(PMC_CNTRL);
+
+ if (invert)
+ value |= PMC_CNTRL_INTR_POLARITY;
+ else
+ value &= ~PMC_CNTRL_INTR_POLARITY;
+
+ tegra_pmc_writel(value, PMC_CNTRL);
+
+ return 0;
+}
+early_initcall(tegra_pmc_early_init);
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index fb61464348a1..40c3d43c9292 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -141,13 +141,13 @@ static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw)
PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO
| PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO
| PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD;
- au_sync();
+ wmb(); /* drain writebuffer */
hw->regs->psc_spievent =
PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO
| PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO
| PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD;
- au_sync();
+ wmb(); /* drain writebuffer */
}
static void au1550_spi_reset_fifos(struct au1550_spi *hw)
@@ -155,10 +155,10 @@ static void au1550_spi_reset_fifos(struct au1550_spi *hw)
u32 pcr;
hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC;
- au_sync();
+ wmb(); /* drain writebuffer */
do {
pcr = hw->regs->psc_spipcr;
- au_sync();
+ wmb(); /* drain writebuffer */
} while (pcr != 0);
}
@@ -188,9 +188,9 @@ static void au1550_spi_chipsel(struct spi_device *spi, int value)
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
cfg = hw->regs->psc_spicfg;
- au_sync();
+ wmb(); /* drain writebuffer */
hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
if (spi->mode & SPI_CPOL)
cfg |= PSC_SPICFG_BI;
@@ -218,10 +218,10 @@ static void au1550_spi_chipsel(struct spi_device *spi, int value)
cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz);
hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
do {
stat = hw->regs->psc_spistat;
- au_sync();
+ wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_DR) == 0);
if (hw->pdata->activate_cs)
@@ -252,9 +252,9 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
cfg = hw->regs->psc_spicfg;
- au_sync();
+ wmb(); /* drain writebuffer */
hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
if (hw->usedma && bpw <= 8)
cfg &= ~PSC_SPICFG_DD_DISABLE;
@@ -268,12 +268,12 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
cfg |= au1550_spi_baudcfg(hw, hz);
hw->regs->psc_spicfg = cfg;
- au_sync();
+ wmb(); /* drain writebuffer */
if (cfg & PSC_SPICFG_DE_ENABLE) {
do {
stat = hw->regs->psc_spistat;
- au_sync();
+ wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_DR) == 0);
}
@@ -396,11 +396,11 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
/* by default enable nearly all events interrupt */
hw->regs->psc_spimsk = PSC_SPIMSK_SD;
- au_sync();
+ wmb(); /* drain writebuffer */
/* start the transfer */
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
- au_sync();
+ wmb(); /* drain writebuffer */
wait_for_completion(&hw->master_done);
@@ -429,7 +429,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
stat = hw->regs->psc_spistat;
evnt = hw->regs->psc_spievent;
- au_sync();
+ wmb(); /* drain writebuffer */
if ((stat & PSC_SPISTAT_DI) == 0) {
dev_err(hw->dev, "Unexpected IRQ!\n");
return IRQ_NONE;
@@ -484,7 +484,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \
{ \
u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \
- au_sync(); \
+ wmb(); /* drain writebuffer */ \
if (hw->rx) { \
*(u##size *)hw->rx = (u##size)fifoword; \
hw->rx += (size) / 8; \
@@ -504,7 +504,7 @@ static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \
if (hw->tx_count >= hw->len) \
fifoword |= PSC_SPITXRX_LC; \
hw->regs->psc_spitxrx = fifoword; \
- au_sync(); \
+ wmb(); /* drain writebuffer */ \
}
AU1550_SPI_RX_WORD(8,0xff)
@@ -539,18 +539,18 @@ static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
}
stat = hw->regs->psc_spistat;
- au_sync();
+ wmb(); /* drain writebuffer */
if (stat & PSC_SPISTAT_TF)
break;
}
/* enable event interrupts */
hw->regs->psc_spimsk = mask;
- au_sync();
+ wmb(); /* drain writebuffer */
/* start the transfer */
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
- au_sync();
+ wmb(); /* drain writebuffer */
wait_for_completion(&hw->master_done);
@@ -564,7 +564,7 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
stat = hw->regs->psc_spistat;
evnt = hw->regs->psc_spievent;
- au_sync();
+ wmb(); /* drain writebuffer */
if ((stat & PSC_SPISTAT_DI) == 0) {
dev_err(hw->dev, "Unexpected IRQ!\n");
return IRQ_NONE;
@@ -594,7 +594,7 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
do {
busy = 0;
stat = hw->regs->psc_spistat;
- au_sync();
+ wmb(); /* drain writebuffer */
/*
* Take care to not let the Rx FIFO overflow.
@@ -615,7 +615,7 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
} while (busy);
hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR;
- au_sync();
+ wmb(); /* drain writebuffer */
/*
* Restart the SPI transmission in case of a transmit underflow.
@@ -634,9 +634,9 @@ static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
*/
if (evnt & PSC_SPIEVNT_TU) {
hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD;
- au_sync();
+ wmb(); /* drain writebuffer */
hw->regs->psc_spipcr = PSC_SPIPCR_MS;
- au_sync();
+ wmb(); /* drain writebuffer */
}
if (hw->rx_count >= hw->len) {
@@ -690,19 +690,19 @@ static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
/* set up the PSC for SPI mode */
hw->regs->psc_ctrl = PSC_CTRL_DISABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
hw->regs->psc_sel = PSC_SEL_PS_SPIMODE;
- au_sync();
+ wmb(); /* drain writebuffer */
hw->regs->psc_spicfg = 0;
- au_sync();
+ wmb(); /* drain writebuffer */
hw->regs->psc_ctrl = PSC_CTRL_ENABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
do {
stat = hw->regs->psc_spistat;
- au_sync();
+ wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_SR) == 0);
@@ -717,16 +717,16 @@ static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
#endif
hw->regs->psc_spicfg = cfg;
- au_sync();
+ wmb(); /* drain writebuffer */
au1550_spi_mask_ack_all(hw);
hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE;
- au_sync();
+ wmb(); /* drain writebuffer */
do {
stat = hw->regs->psc_spistat;
- au_sync();
+ wmb(); /* drain writebuffer */
} while ((stat & PSC_SPISTAT_DR) == 0);
au1550_spi_reset_fifos(hw);
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 1c36311935d7..480133ee1eb3 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1317,19 +1317,6 @@ static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
.tx_st_done = 21,
};
-static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
- .fifo_lvl_mask = { 0x1ff, 0x7F },
- .rx_lvl_offset = 15,
- .tx_st_done = 25,
-};
-
-static struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
- .fifo_lvl_mask = { 0x7f, 0x7F },
- .rx_lvl_offset = 13,
- .tx_st_done = 21,
- .high_speed = true,
-};
-
static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
.fifo_lvl_mask = { 0x1ff, 0x7F },
.rx_lvl_offset = 15,
@@ -1362,12 +1349,6 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = {
.name = "s3c6410-spi",
.driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
}, {
- .name = "s5p64x0-spi",
- .driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config,
- }, {
- .name = "s5pc100-spi",
- .driver_data = (kernel_ulong_t)&s5pc100_spi_port_config,
- }, {
.name = "s5pv210-spi",
.driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
}, {
@@ -1384,9 +1365,6 @@ static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,s3c6410-spi",
.data = (void *)&s3c6410_spi_port_config,
},
- { .compatible = "samsung,s5pc100-spi",
- .data = (void *)&s5pc100_spi_port_config,
- },
{ .compatible = "samsung,s5pv210-spi",
.data = (void *)&s5pv210_spi_port_config,
},
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 02b0379ae550..4f34dc0095b5 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -585,7 +585,6 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
- struct page **page_array_ptr;
page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
@@ -598,8 +597,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
}
tmp_area.addr = page_addr;
tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
- page_array_ptr = page;
- ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, page);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
proc->pid, page_addr);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 47ee6c79857a..6b22106534d8 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -202,7 +202,7 @@ static const struct file_operations imx_drm_driver_fops = {
void imx_drm_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
@@ -293,10 +293,10 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
* userspace will expect to be able to access DRM at this point.
*/
list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
- ret = drm_sysfs_connector_add(connector);
+ ret = drm_connector_register(connector);
if (ret) {
dev_err(drm->dev,
- "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
+ "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
connector->base.id,
connector->name, ret);
goto err_unbind;
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 5dde79418297..8ef1deb59d4a 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -351,7 +351,7 @@ cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
cfs_hash_dhead_t, dh_head);
if (dh->dh_tail != NULL) /* not empty */
- hlist_add_after(dh->dh_tail, hnode);
+ hlist_add_behind(hnode, dh->dh_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dh_head);
dh->dh_tail = hnode;
@@ -406,7 +406,7 @@ cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
cfs_hash_dhead_dep_t, dd_head);
if (dh->dd_tail != NULL) /* not empty */
- hlist_add_after(dh->dd_tail, hnode);
+ hlist_add_behind(hnode, dh->dd_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dd_head);
dh->dd_tail = hnode;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 2920e406030a..5729cf678765 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2065,20 +2065,16 @@ static short rtl8192_alloc_rx_desc_ring(struct net_device *dev)
int i, rx_queue_idx;
for (rx_queue_idx = 0; rx_queue_idx < MAX_RX_QUEUE; rx_queue_idx++) {
- priv->rx_ring[rx_queue_idx] = pci_alloc_consistent(priv->pdev,
- sizeof(*priv->rx_ring[rx_queue_idx]) *
- priv->rxringcount,
- &priv->rx_ring_dma[rx_queue_idx]);
-
+ priv->rx_ring[rx_queue_idx] =
+ pci_zalloc_consistent(priv->pdev,
+ sizeof(*priv->rx_ring[rx_queue_idx]) * priv->rxringcount,
+ &priv->rx_ring_dma[rx_queue_idx]);
if (!priv->rx_ring[rx_queue_idx] ||
(unsigned long)priv->rx_ring[rx_queue_idx] & 0xFF) {
RT_TRACE(COMP_ERR, "Cannot allocate RX ring\n");
return -ENOMEM;
}
- memset(priv->rx_ring[rx_queue_idx], 0,
- sizeof(*priv->rx_ring[rx_queue_idx]) *
- priv->rxringcount);
priv->rx_idx[rx_queue_idx] = 0;
for (i = 0; i < priv->rxringcount; i++) {
@@ -2118,14 +2114,13 @@ static int rtl8192_alloc_tx_desc_ring(struct net_device *dev,
dma_addr_t dma;
int i;
- ring = pci_alloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
+ ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries, &dma);
if (!ring || (unsigned long)ring & 0xFF) {
RT_TRACE(COMP_ERR, "Cannot allocate TX ring (prio = %d)\n",
prio);
return -ENOMEM;
}
- memset(ring, 0, sizeof(*ring)*entries);
priv->tx_ring[prio].desc = ring;
priv->tx_ring[prio].dma = dma;
priv->tx_ring[prio].idx = 0;
diff --git a/drivers/staging/rtl8192ee/pci.c b/drivers/staging/rtl8192ee/pci.c
index f3abbcc9f3ba..0215aef1eacc 100644
--- a/drivers/staging/rtl8192ee/pci.c
+++ b/drivers/staging/rtl8192ee/pci.c
@@ -1224,10 +1224,10 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
/* alloc tx buffer desc for new trx flow*/
if (rtlpriv->use_new_trx_flow) {
- buffer_desc = pci_alloc_consistent(rtlpci->pdev,
- sizeof(*buffer_desc) * entries,
- &buffer_desc_dma);
-
+ buffer_desc =
+ pci_zalloc_consistent(rtlpci->pdev,
+ sizeof(*buffer_desc) * entries,
+ &buffer_desc_dma);
if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
("Cannot allocate TX ring (prio = %d)\n",
@@ -1235,7 +1235,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
return -ENOMEM;
}
- memset(buffer_desc, 0, sizeof(*buffer_desc) * entries);
rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
@@ -1245,16 +1244,14 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
}
/* alloc dma for this ring */
- desc = pci_alloc_consistent(rtlpci->pdev,
- sizeof(*desc) * entries, &desc_dma);
-
+ desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*desc) * entries,
+ &desc_dma);
if (!desc || (unsigned long)desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
("Cannot allocate TX ring (prio = %d)\n", prio));
return -ENOMEM;
}
- memset(desc, 0, sizeof(*desc) * entries);
rtlpci->tx_ring[prio].desc = desc;
rtlpci->tx_ring[prio].dma = desc_dma;
@@ -1290,11 +1287,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
struct rtl_rx_buffer_desc *entry = NULL;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].buffer_desc =
- pci_alloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- buffer_desc) *
- rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ pci_zalloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
@@ -1302,10 +1297,6 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
return -ENOMEM;
}
- memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0,
- sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
- rtlpci->rxringcount);
-
/* init every desc in this ring */
rtlpci->rx_ring[rxring_idx].idx = 0;
@@ -1320,19 +1311,15 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
u8 tmp_one = 1;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].desc =
- pci_alloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- desc) * rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ pci_zalloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
("Cannot allocate RX ring\n"));
return -ENOMEM;
}
- memset(rtlpci->rx_ring[rxring_idx].desc, 0,
- sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
- rtlpci->rxringcount);
/* init every desc in this ring */
rtlpci->rx_ring[rxring_idx].idx = 0;
diff --git a/drivers/staging/rtl8821ae/pci.c b/drivers/staging/rtl8821ae/pci.c
index f9847d1fbdeb..26d7b2fc852a 100644
--- a/drivers/staging/rtl8821ae/pci.c
+++ b/drivers/staging/rtl8821ae/pci.c
@@ -1248,9 +1248,10 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
/* alloc tx buffer desc for new trx flow*/
if (rtlpriv->use_new_trx_flow) {
- buffer_desc = pci_alloc_consistent(rtlpci->pdev,
- sizeof(*buffer_desc) * entries,
- &buffer_desc_dma);
+ buffer_desc =
+ pci_zalloc_consistent(rtlpci->pdev,
+ sizeof(*buffer_desc) * entries,
+ &buffer_desc_dma);
if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
@@ -1259,7 +1260,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
return -ENOMEM;
}
- memset(buffer_desc, 0, sizeof(*buffer_desc) * entries);
rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
@@ -1270,8 +1270,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
}
/* alloc dma for this ring */
- desc = pci_alloc_consistent(rtlpci->pdev,
- sizeof(*desc) * entries, &desc_dma);
+ desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*desc) * entries,
+ &desc_dma);
if (!desc || (unsigned long)desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
@@ -1279,7 +1279,6 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
return -ENOMEM;
}
- memset(desc, 0, sizeof(*desc) * entries);
rtlpci->tx_ring[prio].desc = desc;
rtlpci->tx_ring[prio].dma = desc_dma;
@@ -1316,21 +1315,15 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
struct rtl_rx_buffer_desc *entry = NULL;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].buffer_desc =
- pci_alloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- buffer_desc) *
- rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ pci_zalloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n"));
return -ENOMEM;
}
- memset(rtlpci->rx_ring[rxring_idx].buffer_desc, 0,
- sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
- rtlpci->rxringcount);
-
/* init every desc in this ring */
rtlpci->rx_ring[rxring_idx].idx = 0;
for (i = 0; i < rtlpci->rxringcount; i++) {
@@ -1344,10 +1337,9 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
u8 tmp_one = 1;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].desc =
- pci_alloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- desc) * rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ pci_zalloc_consistent(rtlpci->pdev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma);
if (!rtlpci->rx_ring[rxring_idx].desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
RT_TRACE(COMP_ERR, DBG_EMERG,
@@ -1355,10 +1347,6 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
return -ENOMEM;
}
- memset(rtlpci->rx_ring[rxring_idx].desc, 0,
- sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
- rtlpci->rxringcount);
-
/* init every desc in this ring */
rtlpci->rx_ring[rxring_idx].idx = 0;
for (i = 0; i < rtlpci->rxringcount; i++) {
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 2029b6f8ec83..e7a6ba2002ae 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -463,14 +463,14 @@ static int rtsx_control_thread(void *__dev)
else if (chip->srb->device->id) {
dev_err(&dev->pci->dev, "Bad target number (%d:%d)\n",
chip->srb->device->id,
- chip->srb->device->lun);
+ (u8)chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
}
else if (chip->srb->device->lun > chip->max_lun) {
dev_err(&dev->pci->dev, "Bad LUN (%d:%d)\n",
chip->srb->device->id,
- chip->srb->device->lun);
+ (u8)chip->srb->device->lun);
chip->srb->result = DID_BAD_TARGET << 16;
}
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 50ece291fc6a..f35fa3dfe22c 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1191,18 +1191,15 @@ static int slic_rspqueue_init(struct adapter *adapter)
rspq->num_pages = SLIC_RSPQ_PAGES_GB;
for (i = 0; i < rspq->num_pages; i++) {
- rspq->vaddr[i] = pci_alloc_consistent(adapter->pcidev,
- PAGE_SIZE,
- &rspq->paddr[i]);
+ rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev,
+ PAGE_SIZE,
+ &rspq->paddr[i]);
if (!rspq->vaddr[i]) {
dev_err(&adapter->pcidev->dev,
"pci_alloc_consistent failed\n");
slic_rspqueue_free(adapter);
return -ENOMEM;
}
- /* FIXME:
- * do we really need this assertions (4K PAGE_SIZE aligned addr)? */
- memset(rspq->vaddr[i], 0, PAGE_SIZE);
if (paddrh == 0) {
slic_reg32_write(&slic_regs->slic_rbar,
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index c78d06eff7ea..0b583a37f5b3 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1111,25 +1111,17 @@ static bool device_init_rings(PSDevice pDevice)
void *vir_pool;
/*allocate all RD/TD rings a single pool*/
- vir_pool = pci_alloc_consistent(pDevice->pcid,
- pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
- pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
- pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
- pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
- &pDevice->pool_dma);
-
+ vir_pool = pci_zalloc_consistent(pDevice->pcid,
+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
+ pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
+ pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
+ &pDevice->pool_dma);
if (vir_pool == NULL) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s : allocate desc dma memory failed\n", pDevice->dev->name);
return false;
}
- memset(vir_pool, 0,
- pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
- pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
- pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
- pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
- );
-
pDevice->aRD0Ring = vir_pool;
pDevice->aRD1Ring = vir_pool +
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);
@@ -1138,13 +1130,12 @@ static bool device_init_rings(PSDevice pDevice)
pDevice->rd1_pool_dma = pDevice->rd0_pool_dma +
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);
- pDevice->tx0_bufs = pci_alloc_consistent(pDevice->pcid,
- pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
- pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
- CB_BEACON_BUF_SIZE +
- CB_MAX_BUF_SIZE,
- &pDevice->tx_bufs_dma0);
-
+ pDevice->tx0_bufs = pci_zalloc_consistent(pDevice->pcid,
+ pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
+ pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
+ CB_BEACON_BUF_SIZE +
+ CB_MAX_BUF_SIZE,
+ &pDevice->tx_bufs_dma0);
if (pDevice->tx0_bufs == NULL) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: allocate buf dma memory failed\n", pDevice->dev->name);
pci_free_consistent(pDevice->pcid,
@@ -1157,13 +1148,6 @@ static bool device_init_rings(PSDevice pDevice)
return false;
}
- memset(pDevice->tx0_bufs, 0,
- pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
- pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
- CB_BEACON_BUF_SIZE +
- CB_MAX_BUF_SIZE
- );
-
pDevice->td0_pool_dma = pDevice->rd1_pool_dma +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 8c64b8776a96..340de9d92b15 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -252,7 +252,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
struct tcm_loop_cmd *tl_cmd;
- pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+ pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
" scsi_buf_len: %u\n", sc->device->host->host_no,
sc->device->id, sc->device->channel, sc->device->lun,
sc->cmnd[0], scsi_bufflen(sc));
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 94d00df28f39..943b1dbe859a 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -312,7 +312,7 @@ static int pscsi_add_device_to_list(struct se_device *dev,
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
- pr_err("Set broken SCSI Device %d:%d:%d"
+ pr_err("Set broken SCSI Device %d:%d:%llu"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
@@ -375,7 +375,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
int ret;
if (scsi_device_get(sd)) {
- pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return -EIO;
@@ -401,7 +401,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
return ret;
}
- pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%llu\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
return 0;
}
@@ -417,7 +417,7 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
int ret;
if (scsi_device_get(sd)) {
- pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%llu\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return -EIO;
@@ -429,7 +429,7 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
scsi_device_put(sd);
return ret;
}
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@@ -452,7 +452,7 @@ static int pscsi_create_type_other(struct se_device *dev,
if (ret)
return ret;
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
return 0;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f9a13867cb70..693208eb9047 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -151,7 +151,7 @@ config KIRKWOOD_THERMAL
config DOVE_THERMAL
tristate "Temperature sensor on Marvell Dove SoCs"
- depends on ARCH_DOVE
+ depends on ARCH_DOVE || MACH_DOVE
depends on OF
help
Support for the Dove thermal sensor driver in the Linux thermal
@@ -243,4 +243,9 @@ depends on ARCH_EXYNOS
source "drivers/thermal/samsung/Kconfig"
endmenu
+menu "STMicroelectronics thermal drivers"
+depends on ARCH_STI && OF
+source "drivers/thermal/st/Kconfig"
+endmenu
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index de0636a57a64..31e232f84b6b 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -32,3 +32,4 @@ obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o
+obj-$(CONFIG_ST_THERMAL) += st/
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 84a75f89bf74..1ab0018271c5 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -305,7 +305,7 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
* @event: value showing cpufreq event for which this function invoked.
* @data: callback-specific data
*
- * Callback to highjack the notification on cpufreq policy transition.
+ * Callback to hijack the notification on cpufreq policy transition.
* Every time there is a change in policy, we will intercept and
* update the cpufreq policy with thermal constraints.
*
diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c
index e93f0253f6ed..17554eeb3953 100644
--- a/drivers/thermal/int3403_thermal.c
+++ b/drivers/thermal/int3403_thermal.c
@@ -33,6 +33,10 @@
struct int3403_sensor {
struct thermal_zone_device *tzone;
unsigned long *thresholds;
+ unsigned long crit_temp;
+ int crit_trip_id;
+ unsigned long psv_temp;
+ int psv_trip_id;
};
static int sys_get_curr_temp(struct thermal_zone_device *tzone,
@@ -79,12 +83,18 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzone,
struct acpi_device *device = tzone->devdata;
struct int3403_sensor *obj = acpi_driver_data(device);
- /*
- * get_trip_temp is a mandatory callback but
- * PATx method doesn't return any value, so return
- * cached value, which was last set from user space.
- */
- *temp = obj->thresholds[trip];
+ if (trip == obj->crit_trip_id)
+ *temp = obj->crit_temp;
+ else if (trip == obj->psv_trip_id)
+ *temp = obj->psv_temp;
+ else {
+ /*
+ * get_trip_temp is a mandatory callback but
+ * PATx method doesn't return any value, so return
+ * cached value, which was last set from user space.
+ */
+ *temp = obj->thresholds[trip];
+ }
return 0;
}
@@ -92,8 +102,14 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzone,
static int sys_get_trip_type(struct thermal_zone_device *thermal,
int trip, enum thermal_trip_type *type)
{
+ struct acpi_device *device = thermal->devdata;
+ struct int3403_sensor *obj = acpi_driver_data(device);
+
/* Mandatory callback, may not mean much here */
- *type = THERMAL_TRIP_PASSIVE;
+ if (trip == obj->crit_trip_id)
+ *type = THERMAL_TRIP_CRITICAL;
+ else
+ *type = THERMAL_TRIP_PASSIVE;
return 0;
}
@@ -155,6 +171,34 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
}
}
+static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
+{
+ unsigned long long crt;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
+
+ return 0;
+}
+
+static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
+{
+ unsigned long long psv;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
+
+ return 0;
+}
+
static int acpi_int3403_add(struct acpi_device *device)
{
int result = 0;
@@ -194,6 +238,15 @@ static int acpi_int3403_add(struct acpi_device *device)
return -ENOMEM;
trip_mask = BIT(trip_cnt) - 1;
}
+
+ obj->psv_trip_id = -1;
+ if (!sys_get_trip_psv(device, &obj->psv_temp))
+ obj->psv_trip_id = trip_cnt++;
+
+ obj->crit_trip_id = -1;
+ if (!sys_get_trip_crt(device, &obj->crit_temp))
+ obj->crit_trip_id = trip_cnt++;
+
obj->tzone = thermal_zone_device_register(acpi_device_bid(device),
trip_cnt, trip_mask, device, &tzone_ops,
NULL, 0, 0);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d7ca9f49c9cb..acbff14da3a4 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -505,6 +505,10 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
static const struct of_device_id exynos_tmu_match[] = {
{
+ .compatible = "samsung,exynos3250-tmu",
+ .data = (void *)EXYNOS3250_TMU_DRV_DATA,
+ },
+ {
.compatible = "samsung,exynos4210-tmu",
.data = (void *)EXYNOS4210_TMU_DRV_DATA,
},
@@ -677,7 +681,8 @@ static int exynos_tmu_probe(struct platform_device *pdev)
goto err_clk_sec;
}
- if (pdata->type == SOC_ARCH_EXYNOS4210 ||
+ if (pdata->type == SOC_ARCH_EXYNOS3250 ||
+ pdata->type == SOC_ARCH_EXYNOS4210 ||
pdata->type == SOC_ARCH_EXYNOS4412 ||
pdata->type == SOC_ARCH_EXYNOS5250 ||
pdata->type == SOC_ARCH_EXYNOS5260 ||
@@ -759,10 +764,10 @@ static int exynos_tmu_remove(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
- exynos_tmu_control(pdev, false);
-
exynos_unregister_thermal(data->reg_conf);
+ exynos_tmu_control(pdev, false);
+
clk_unprepare(data->clk);
if (!IS_ERR(data->clk_sec))
clk_unprepare(data->clk_sec);
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index edd08cf76729..1b4a6444ea61 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -40,7 +40,8 @@ enum calibration_mode {
};
enum soc_type {
- SOC_ARCH_EXYNOS4210 = 1,
+ SOC_ARCH_EXYNOS3250 = 1,
+ SOC_ARCH_EXYNOS4210,
SOC_ARCH_EXYNOS4412,
SOC_ARCH_EXYNOS5250,
SOC_ARCH_EXYNOS5260,
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index c1d81dcd7819..aa8e0dee2055 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -90,6 +90,95 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
};
#endif
+#if defined(CONFIG_SOC_EXYNOS3250)
+static const struct exynos_tmu_registers exynos3250_tmu_registers = {
+ .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
+ .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
+ .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
+ .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+ .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
+ .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
+ .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
+ .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
+ .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
+ .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
+ .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
+ .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
+ .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
+ .tmu_status = EXYNOS_TMU_REG_STATUS,
+ .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
+ .threshold_th0 = EXYNOS_THD_TEMP_RISE,
+ .threshold_th1 = EXYNOS_THD_TEMP_FALL,
+ .tmu_inten = EXYNOS_TMU_REG_INTEN,
+ .inten_rise0_shift = EXYNOS_TMU_INTEN_RISE0_SHIFT,
+ .inten_rise1_shift = EXYNOS_TMU_INTEN_RISE1_SHIFT,
+ .inten_rise2_shift = EXYNOS_TMU_INTEN_RISE2_SHIFT,
+ .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
+ .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
+ .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
+ .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT,
+ .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
+ .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
+ .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
+ .emul_con = EXYNOS_EMUL_CON,
+ .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
+ .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
+ .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
+};
+
+#define EXYNOS3250_TMU_DATA \
+ .threshold_falling = 10, \
+ .trigger_levels[0] = 70, \
+ .trigger_levels[1] = 95, \
+ .trigger_levels[2] = 110, \
+ .trigger_levels[3] = 120, \
+ .trigger_enable[0] = true, \
+ .trigger_enable[1] = true, \
+ .trigger_enable[2] = true, \
+ .trigger_enable[3] = false, \
+ .trigger_type[0] = THROTTLE_ACTIVE, \
+ .trigger_type[1] = THROTTLE_ACTIVE, \
+ .trigger_type[2] = SW_TRIP, \
+ .trigger_type[3] = HW_TRIP, \
+ .max_trigger_level = 4, \
+ .gain = 8, \
+ .reference_voltage = 16, \
+ .noise_cancel_mode = 4, \
+ .cal_type = TYPE_TWO_POINT_TRIMMING, \
+ .efuse_value = 55, \
+ .min_efuse_value = 40, \
+ .max_efuse_value = 100, \
+ .first_point_trim = 25, \
+ .second_point_trim = 85, \
+ .default_temp_offset = 50, \
+ .freq_tab[0] = { \
+ .freq_clip_max = 800 * 1000, \
+ .temp_level = 70, \
+ }, \
+ .freq_tab[1] = { \
+ .freq_clip_max = 400 * 1000, \
+ .temp_level = 95, \
+ }, \
+ .freq_tab_count = 2, \
+ .registers = &exynos3250_tmu_registers, \
+ .features = (TMU_SUPPORT_EMULATION | \
+ TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
+ TMU_SUPPORT_EMUL_TIME)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS3250)
+struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
+ .tmu_data = {
+ {
+ EXYNOS3250_TMU_DATA,
+ .type = SOC_ARCH_EXYNOS3250,
+ .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
+ },
+ },
+ .tmu_count = 1,
+};
+#endif
+
#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
static const struct exynos_tmu_registers exynos4412_tmu_registers = {
.triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index d268981b65e5..f0979e598491 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -148,6 +148,13 @@
#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
+#if defined(CONFIG_SOC_EXYNOS3250)
+extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
+#define EXYNOS3250_TMU_DRV_DATA (&exynos3250_default_tmu_data)
+#else
+#define EXYNOS3250_TMU_DRV_DATA (NULL)
+#endif
+
#if defined(CONFIG_CPU_EXYNOS4210)
extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
#define EXYNOS4210_TMU_DRV_DATA (&exynos4210_default_tmu_data)
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
new file mode 100644
index 000000000000..490fdbe22eea
--- /dev/null
+++ b/drivers/thermal/st/Kconfig
@@ -0,0 +1,12 @@
+config ST_THERMAL
+ tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
+ help
+ Support for thermal sensors on STMicroelectronics STi series of SoCs.
+
+config ST_THERMAL_SYSCFG
+ select ST_THERMAL
+ tristate "STi series syscfg register access based thermal sensors"
+
+config ST_THERMAL_MEMMAP
+ select ST_THERMAL
+ tristate "STi series memory mapped access based thermal sensors"
diff --git a/drivers/thermal/st/Makefile b/drivers/thermal/st/Makefile
new file mode 100644
index 000000000000..b38878977bd8
--- /dev/null
+++ b/drivers/thermal/st/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ST_THERMAL) := st_thermal.o
+obj-$(CONFIG_ST_THERMAL_SYSCFG) += st_thermal_syscfg.o
+obj-$(CONFIG_ST_THERMAL_MEMMAP) += st_thermal_memmap.o
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
new file mode 100644
index 000000000000..90163b384660
--- /dev/null
+++ b/drivers/thermal/st/st_thermal.c
@@ -0,0 +1,313 @@
+/*
+ * ST Thermal Sensor Driver core routines
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "st_thermal.h"
+
+/* The Thermal Framework expects millidegrees */
+#define mcelsius(temp) ((temp) * 1000)
+
+/*
+ * Function to allocate regfields which are common
+ * between syscfg and memory mapped based sensors
+ */
+int st_thermal_alloc_regfields(struct st_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+ struct regmap *regmap = sensor->regmap;
+ const struct reg_field *reg_fields = sensor->cdata->reg_fields;
+
+ sensor->dcorrect = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[DCORRECT]);
+
+ sensor->overflow = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[OVERFLOW]);
+
+ sensor->temp_data = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[DATA]);
+
+ if (IS_ERR(sensor->dcorrect) ||
+ IS_ERR(sensor->overflow) ||
+ IS_ERR(sensor->temp_data)) {
+ dev_err(dev, "failed to allocate common regfields\n");
+ return -EINVAL;
+ }
+
+ return sensor->ops->alloc_regfields(sensor);
+}
+
+static int st_thermal_sensor_on(struct st_thermal_sensor *sensor)
+{
+ int ret;
+ struct device *dev = sensor->dev;
+
+ ret = clk_prepare_enable(sensor->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
+
+ ret = sensor->ops->power_ctrl(sensor, POWER_ON);
+ if (ret) {
+ dev_err(dev, "failed to power on sensor\n");
+ clk_disable_unprepare(sensor->clk);
+ }
+
+ return ret;
+}
+
+static int st_thermal_sensor_off(struct st_thermal_sensor *sensor)
+{
+ int ret;
+
+ ret = sensor->ops->power_ctrl(sensor, POWER_OFF);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(sensor->clk);
+
+ return 0;
+}
+
+static int st_thermal_calibration(struct st_thermal_sensor *sensor)
+{
+ int ret;
+ unsigned int val;
+ struct device *dev = sensor->dev;
+
+ /* Check if sensor calibration data is already written */
+ ret = regmap_field_read(sensor->dcorrect, &val);
+ if (ret) {
+ dev_err(dev, "failed to read calibration data\n");
+ return ret;
+ }
+
+ if (!val) {
+ /*
+ * Sensor calibration value not set by bootloader,
+ * default calibration data to be used
+ */
+ ret = regmap_field_write(sensor->dcorrect,
+ sensor->cdata->calibration_val);
+ if (ret)
+ dev_err(dev, "failed to set calibration data\n");
+ }
+
+ return ret;
+}
+
+/* Callback to get temperature from HW*/
+static int st_thermal_get_temp(struct thermal_zone_device *th,
+ unsigned long *temperature)
+{
+ struct st_thermal_sensor *sensor = th->devdata;
+ struct device *dev = sensor->dev;
+ unsigned int temp;
+ unsigned int overflow;
+ int ret;
+
+ ret = regmap_field_read(sensor->overflow, &overflow);
+ if (ret)
+ return ret;
+ if (overflow)
+ return -EIO;
+
+ ret = regmap_field_read(sensor->temp_data, &temp);
+ if (ret)
+ return ret;
+
+ temp += sensor->cdata->temp_adjust_val;
+ temp = mcelsius(temp);
+
+ dev_dbg(dev, "temperature: %d\n", temp);
+
+ *temperature = temp;
+
+ return 0;
+}
+
+static int st_thermal_get_trip_type(struct thermal_zone_device *th,
+ int trip, enum thermal_trip_type *type)
+{
+ struct st_thermal_sensor *sensor = th->devdata;
+ struct device *dev = sensor->dev;
+
+ switch (trip) {
+ case 0:
+ *type = THERMAL_TRIP_CRITICAL;
+ break;
+ default:
+ dev_err(dev, "invalid trip point\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int st_thermal_get_trip_temp(struct thermal_zone_device *th,
+ int trip, unsigned long *temp)
+{
+ struct st_thermal_sensor *sensor = th->devdata;
+ struct device *dev = sensor->dev;
+
+ switch (trip) {
+ case 0:
+ *temp = mcelsius(sensor->cdata->crit_temp);
+ break;
+ default:
+ dev_err(dev, "Invalid trip point\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops st_tz_ops = {
+ .get_temp = st_thermal_get_temp,
+ .get_trip_type = st_thermal_get_trip_type,
+ .get_trip_temp = st_thermal_get_trip_temp,
+};
+
+int st_thermal_register(struct platform_device *pdev,
+ const struct of_device_id *st_thermal_of_match)
+{
+ struct st_thermal_sensor *sensor;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *match;
+
+ int polling_delay;
+ int ret;
+
+ if (!np) {
+ dev_err(dev, "device tree node not found\n");
+ return -EINVAL;
+ }
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->dev = dev;
+
+ match = of_match_device(st_thermal_of_match, dev);
+ if (!(match && match->data))
+ return -EINVAL;
+
+ sensor->cdata = match->data;
+ if (!sensor->cdata->ops)
+ return -EINVAL;
+
+ sensor->ops = sensor->cdata->ops;
+
+ ret = sensor->ops->regmap_init(sensor);
+ if (ret)
+ return ret;
+
+ ret = st_thermal_alloc_regfields(sensor);
+ if (ret)
+ return ret;
+
+ sensor->clk = devm_clk_get(dev, "thermal");
+ if (IS_ERR(sensor->clk)) {
+ dev_err(dev, "failed to fetch clock\n");
+ return PTR_ERR(sensor->clk);
+ }
+
+ if (sensor->ops->register_enable_irq) {
+ ret = sensor->ops->register_enable_irq(sensor);
+ if (ret)
+ return ret;
+ }
+
+ ret = st_thermal_sensor_on(sensor);
+ if (ret)
+ return ret;
+
+ ret = st_thermal_calibration(sensor);
+ if (ret)
+ goto sensor_off;
+
+ polling_delay = sensor->ops->register_enable_irq ? 0 : 1000;
+
+ sensor->thermal_dev =
+ thermal_zone_device_register(dev_name(dev), 1, 0, sensor,
+ &st_tz_ops, NULL, 0, polling_delay);
+ if (IS_ERR(sensor->thermal_dev)) {
+ dev_err(dev, "failed to register thermal zone device\n");
+ ret = PTR_ERR(sensor->thermal_dev);
+ goto sensor_off;
+ }
+
+ platform_set_drvdata(pdev, sensor);
+
+ return 0;
+
+sensor_off:
+ st_thermal_sensor_off(sensor);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(st_thermal_register);
+
+int st_thermal_unregister(struct platform_device *pdev)
+{
+ struct st_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ st_thermal_sensor_off(sensor);
+ thermal_zone_device_unregister(sensor->thermal_dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(st_thermal_unregister);
+
+static int st_thermal_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct st_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ return st_thermal_sensor_off(sensor);
+}
+
+static int st_thermal_resume(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct st_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ ret = st_thermal_sensor_on(sensor);
+ if (ret)
+ return ret;
+
+ ret = st_thermal_calibration(sensor);
+ if (ret)
+ return ret;
+
+ if (sensor->ops->enable_irq) {
+ ret = sensor->ops->enable_irq(sensor);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
+EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
+
+MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/st/st_thermal.h b/drivers/thermal/st/st_thermal.h
new file mode 100644
index 000000000000..fecafbe10fa7
--- /dev/null
+++ b/drivers/thermal/st/st_thermal.h
@@ -0,0 +1,104 @@
+/*
+ * ST Thermal Sensor Driver for STi series of SoCs
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STI_THERMAL_SYSCFG_H
+#define __STI_THERMAL_SYSCFG_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/thermal.h>
+
+enum st_thermal_regfield_ids {
+ INT_THRESH_HI = 0, /* Top two regfield IDs are mutually exclusive */
+ TEMP_PWR = 0,
+ DCORRECT,
+ OVERFLOW,
+ DATA,
+ INT_ENABLE,
+
+ MAX_REGFIELDS
+};
+
+/* Thermal sensor power states */
+enum st_thermal_power_state {
+ POWER_OFF = 0,
+ POWER_ON
+};
+
+struct st_thermal_sensor;
+
+/**
+ * Description of private thermal sensor ops.
+ *
+ * @power_ctrl: Function for powering on/off a sensor. Clock to the
+ * sensor is also controlled from this function.
+ * @alloc_regfields: Allocate regmap register fields, specific to a sensor.
+ * @do_memmap_regmap: Memory map the thermal register space and init regmap
+ * instance or find regmap instance.
+ * @register_irq: Register an interrupt handler for a sensor.
+ */
+struct st_thermal_sensor_ops {
+ int (*power_ctrl)(struct st_thermal_sensor *, enum st_thermal_power_state);
+ int (*alloc_regfields)(struct st_thermal_sensor *);
+ int (*regmap_init)(struct st_thermal_sensor *);
+ int (*register_enable_irq)(struct st_thermal_sensor *);
+ int (*enable_irq)(struct st_thermal_sensor *);
+};
+
+/**
+ * Description of thermal driver compatible data.
+ *
+ * @reg_fields: Pointer to the regfields array for a sensor.
+ * @sys_compat: Pointer to the syscon node compatible string.
+ * @ops: Pointer to private thermal ops for a sensor.
+ * @calibration_val: Default calibration value to be written to the DCORRECT
+ * register field for a sensor.
+ * @temp_adjust_val: Value to be added/subtracted from the data read from
+ * the sensor. If value needs to be added please provide a
+ * positive value and if it is to be subtracted please
+ * provide a negative value.
+ * @crit_temp: The temperature beyond which the SoC should be shutdown
+ * to prevent damage.
+ */
+struct st_thermal_compat_data {
+ char *sys_compat;
+ const struct reg_field *reg_fields;
+ const struct st_thermal_sensor_ops *ops;
+ unsigned int calibration_val;
+ int temp_adjust_val;
+ int crit_temp;
+};
+
+struct st_thermal_sensor {
+ struct device *dev;
+ struct thermal_zone_device *thermal_dev;
+ const struct st_thermal_sensor_ops *ops;
+ const struct st_thermal_compat_data *cdata;
+ struct clk *clk;
+ struct regmap *regmap;
+ struct regmap_field *pwr;
+ struct regmap_field *dcorrect;
+ struct regmap_field *overflow;
+ struct regmap_field *temp_data;
+ struct regmap_field *int_thresh_hi;
+ struct regmap_field *int_enable;
+ int irq;
+ void __iomem *mmio_base;
+};
+
+extern int st_thermal_register(struct platform_device *pdev,
+ const struct of_device_id *st_thermal_of_match);
+extern int st_thermal_unregister(struct platform_device *pdev);
+extern const struct dev_pm_ops st_thermal_pm_ops;
+
+#endif /* __STI_RESET_SYSCFG_H */
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
new file mode 100644
index 000000000000..39896ce2ee00
--- /dev/null
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -0,0 +1,209 @@
+/*
+ * ST Thermal Sensor Driver for memory mapped sensors.
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+
+#include "st_thermal.h"
+
+#define STIH416_MPE_CONF 0x0
+#define STIH416_MPE_STATUS 0x4
+#define STIH416_MPE_INT_THRESH 0x8
+#define STIH416_MPE_INT_EN 0xC
+
+/* Power control bits for the memory mapped thermal sensor */
+#define THERMAL_PDN BIT(4)
+#define THERMAL_SRSTN BIT(10)
+
+static const struct reg_field st_mmap_thermal_regfields[MAX_REGFIELDS] = {
+ /*
+ * According to the STIH416 MPE temp sensor data sheet -
+ * the PDN (Power Down Bit) and SRSTN (Soft Reset Bit) need to be
+ * written simultaneously for powering on and off the temperature
+ * sensor. regmap_update_bits() will be used to update the register.
+ */
+ [INT_THRESH_HI] = REG_FIELD(STIH416_MPE_INT_THRESH, 0, 7),
+ [DCORRECT] = REG_FIELD(STIH416_MPE_CONF, 5, 9),
+ [OVERFLOW] = REG_FIELD(STIH416_MPE_STATUS, 9, 9),
+ [DATA] = REG_FIELD(STIH416_MPE_STATUS, 11, 18),
+ [INT_ENABLE] = REG_FIELD(STIH416_MPE_INT_EN, 0, 0),
+};
+
+static irqreturn_t st_mmap_thermal_trip_handler(int irq, void *sdata)
+{
+ struct st_thermal_sensor *sensor = sdata;
+
+ thermal_zone_device_update(sensor->thermal_dev);
+
+ return IRQ_HANDLED;
+}
+
+/* Private ops for the Memory Mapped based thermal sensors */
+static int st_mmap_power_ctrl(struct st_thermal_sensor *sensor,
+ enum st_thermal_power_state power_state)
+{
+ const unsigned int mask = (THERMAL_PDN | THERMAL_SRSTN);
+ const unsigned int val = power_state ? mask : 0;
+
+ return regmap_update_bits(sensor->regmap, STIH416_MPE_CONF, mask, val);
+}
+
+static int st_mmap_alloc_regfields(struct st_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+ struct regmap *regmap = sensor->regmap;
+ const struct reg_field *reg_fields = sensor->cdata->reg_fields;
+
+ sensor->int_thresh_hi = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[INT_THRESH_HI]);
+ sensor->int_enable = devm_regmap_field_alloc(dev, regmap,
+ reg_fields[INT_ENABLE]);
+
+ if (IS_ERR(sensor->int_thresh_hi) || IS_ERR(sensor->int_enable)) {
+ dev_err(dev, "failed to alloc mmap regfields\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int st_mmap_enable_irq(struct st_thermal_sensor *sensor)
+{
+ int ret;
+
+ /* Set upper critical threshold */
+ ret = regmap_field_write(sensor->int_thresh_hi,
+ sensor->cdata->crit_temp -
+ sensor->cdata->temp_adjust_val);
+ if (ret)
+ return ret;
+
+ return regmap_field_write(sensor->int_enable, 1);
+}
+
+static int st_mmap_register_enable_irq(struct st_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ sensor->irq = platform_get_irq(pdev, 0);
+ if (sensor->irq < 0) {
+ dev_err(dev, "failed to register IRQ\n");
+ return sensor->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, sensor->irq,
+ NULL, st_mmap_thermal_trip_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev->driver->name, sensor);
+ if (ret) {
+ dev_err(dev, "failed to register IRQ %d\n", sensor->irq);
+ return ret;
+ }
+
+ return st_mmap_enable_irq(sensor);
+}
+
+static const struct regmap_config st_416mpe_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+static int st_mmap_regmap_init(struct st_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no memory resources defined\n");
+ return -ENODEV;
+ }
+
+ sensor->mmio_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(sensor->mmio_base)) {
+ dev_err(dev, "failed to remap IO\n");
+ return PTR_ERR(sensor->mmio_base);
+ }
+
+ sensor->regmap = devm_regmap_init_mmio(dev, sensor->mmio_base,
+ &st_416mpe_regmap_config);
+ if (IS_ERR(sensor->regmap)) {
+ dev_err(dev, "failed to initialise regmap\n");
+ return PTR_ERR(sensor->regmap);
+ }
+
+ return 0;
+}
+
+static const struct st_thermal_sensor_ops st_mmap_sensor_ops = {
+ .power_ctrl = st_mmap_power_ctrl,
+ .alloc_regfields = st_mmap_alloc_regfields,
+ .regmap_init = st_mmap_regmap_init,
+ .register_enable_irq = st_mmap_register_enable_irq,
+ .enable_irq = st_mmap_enable_irq,
+};
+
+/* Compatible device data stih416 mpe thermal sensor */
+const struct st_thermal_compat_data st_416mpe_cdata = {
+ .reg_fields = st_mmap_thermal_regfields,
+ .ops = &st_mmap_sensor_ops,
+ .calibration_val = 14,
+ .temp_adjust_val = -95,
+ .crit_temp = 120,
+};
+
+/* Compatible device data stih407 thermal sensor */
+const struct st_thermal_compat_data st_407_cdata = {
+ .reg_fields = st_mmap_thermal_regfields,
+ .ops = &st_mmap_sensor_ops,
+ .calibration_val = 16,
+ .temp_adjust_val = -95,
+ .crit_temp = 120,
+};
+
+static struct of_device_id st_mmap_thermal_of_match[] = {
+ { .compatible = "st,stih416-mpe-thermal", .data = &st_416mpe_cdata },
+ { .compatible = "st,stih407-thermal", .data = &st_407_cdata },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st_mmap_thermal_of_match);
+
+int st_mmap_probe(struct platform_device *pdev)
+{
+ return st_thermal_register(pdev, st_mmap_thermal_of_match);
+}
+
+int st_mmap_remove(struct platform_device *pdev)
+{
+ return st_thermal_unregister(pdev);
+}
+
+static struct platform_driver st_mmap_thermal_driver = {
+ .driver = {
+ .name = "st_thermal_mmap",
+ .owner = THIS_MODULE,
+ .pm = &st_thermal_pm_ops,
+ .of_match_table = st_mmap_thermal_of_match,
+ },
+ .probe = st_mmap_probe,
+ .remove = st_mmap_remove,
+};
+
+module_platform_driver(st_mmap_thermal_driver);
+
+MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/st/st_thermal_syscfg.c b/drivers/thermal/st/st_thermal_syscfg.c
new file mode 100644
index 000000000000..888b58e64090
--- /dev/null
+++ b/drivers/thermal/st/st_thermal_syscfg.c
@@ -0,0 +1,179 @@
+/*
+ * ST Thermal Sensor Driver for syscfg based sensors.
+ * Author: Ajit Pal Singh <ajitpal.singh@st.com>
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+
+#include "st_thermal.h"
+
+/* STiH415 */
+#define STIH415_SYSCFG_FRONT(num) ((num - 100) * 4)
+#define STIH415_SAS_THSENS_CONF STIH415_SYSCFG_FRONT(178)
+#define STIH415_SAS_THSENS_STATUS STIH415_SYSCFG_FRONT(198)
+#define STIH415_SYSCFG_MPE(num) ((num - 600) * 4)
+#define STIH415_MPE_THSENS_CONF STIH415_SYSCFG_MPE(607)
+#define STIH415_MPE_THSENS_STATUS STIH415_SYSCFG_MPE(667)
+
+/* STiH416 */
+#define STIH416_SYSCFG_FRONT(num) ((num - 1000) * 4)
+#define STIH416_SAS_THSENS_CONF STIH416_SYSCFG_FRONT(1552)
+#define STIH416_SAS_THSENS_STATUS1 STIH416_SYSCFG_FRONT(1554)
+#define STIH416_SAS_THSENS_STATUS2 STIH416_SYSCFG_FRONT(1594)
+
+/* STiD127 */
+#define STID127_SYSCFG_CPU(num) ((num - 700) * 4)
+#define STID127_THSENS_CONF STID127_SYSCFG_CPU(743)
+#define STID127_THSENS_STATUS STID127_SYSCFG_CPU(767)
+
+static const struct reg_field st_415sas_regfields[MAX_REGFIELDS] = {
+ [TEMP_PWR] = REG_FIELD(STIH415_SAS_THSENS_CONF, 9, 9),
+ [DCORRECT] = REG_FIELD(STIH415_SAS_THSENS_CONF, 4, 8),
+ [OVERFLOW] = REG_FIELD(STIH415_SAS_THSENS_STATUS, 8, 8),
+ [DATA] = REG_FIELD(STIH415_SAS_THSENS_STATUS, 10, 16),
+};
+
+static const struct reg_field st_415mpe_regfields[MAX_REGFIELDS] = {
+ [TEMP_PWR] = REG_FIELD(STIH415_MPE_THSENS_CONF, 8, 8),
+ [DCORRECT] = REG_FIELD(STIH415_MPE_THSENS_CONF, 3, 7),
+ [OVERFLOW] = REG_FIELD(STIH415_MPE_THSENS_STATUS, 9, 9),
+ [DATA] = REG_FIELD(STIH415_MPE_THSENS_STATUS, 11, 18),
+};
+
+static const struct reg_field st_416sas_regfields[MAX_REGFIELDS] = {
+ [TEMP_PWR] = REG_FIELD(STIH416_SAS_THSENS_CONF, 9, 9),
+ [DCORRECT] = REG_FIELD(STIH416_SAS_THSENS_CONF, 4, 8),
+ [OVERFLOW] = REG_FIELD(STIH416_SAS_THSENS_STATUS1, 8, 8),
+ [DATA] = REG_FIELD(STIH416_SAS_THSENS_STATUS2, 10, 16),
+};
+
+static const struct reg_field st_127_regfields[MAX_REGFIELDS] = {
+ [TEMP_PWR] = REG_FIELD(STID127_THSENS_CONF, 7, 7),
+ [DCORRECT] = REG_FIELD(STID127_THSENS_CONF, 2, 6),
+ [OVERFLOW] = REG_FIELD(STID127_THSENS_STATUS, 9, 9),
+ [DATA] = REG_FIELD(STID127_THSENS_STATUS, 11, 18),
+};
+
+/* Private OPs for System Configuration Register based thermal sensors */
+static int st_syscfg_power_ctrl(struct st_thermal_sensor *sensor,
+ enum st_thermal_power_state power_state)
+{
+ return regmap_field_write(sensor->pwr, power_state);
+}
+
+static int st_syscfg_alloc_regfields(struct st_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+
+ sensor->pwr = devm_regmap_field_alloc(dev, sensor->regmap,
+ sensor->cdata->reg_fields[TEMP_PWR]);
+
+ if (IS_ERR(sensor->pwr)) {
+ dev_err(dev, "failed to alloc syscfg regfields\n");
+ return PTR_ERR(sensor->pwr);
+ }
+
+ return 0;
+}
+
+static int st_syscfg_regmap_init(struct st_thermal_sensor *sensor)
+{
+ sensor->regmap =
+ syscon_regmap_lookup_by_compatible(sensor->cdata->sys_compat);
+ if (IS_ERR(sensor->regmap)) {
+ dev_err(sensor->dev, "failed to find syscfg regmap\n");
+ return PTR_ERR(sensor->regmap);
+ }
+
+ return 0;
+}
+
+static const struct st_thermal_sensor_ops st_syscfg_sensor_ops = {
+ .power_ctrl = st_syscfg_power_ctrl,
+ .alloc_regfields = st_syscfg_alloc_regfields,
+ .regmap_init = st_syscfg_regmap_init,
+};
+
+/* Compatible device data for stih415 sas thermal sensor */
+const struct st_thermal_compat_data st_415sas_cdata = {
+ .sys_compat = "st,stih415-front-syscfg",
+ .reg_fields = st_415sas_regfields,
+ .ops = &st_syscfg_sensor_ops,
+ .calibration_val = 16,
+ .temp_adjust_val = 20,
+ .crit_temp = 120,
+};
+
+/* Compatible device data for stih415 mpe thermal sensor */
+const struct st_thermal_compat_data st_415mpe_cdata = {
+ .sys_compat = "st,stih415-system-syscfg",
+ .reg_fields = st_415mpe_regfields,
+ .ops = &st_syscfg_sensor_ops,
+ .calibration_val = 16,
+ .temp_adjust_val = -103,
+ .crit_temp = 120,
+};
+
+/* Compatible device data for stih416 sas thermal sensor */
+const struct st_thermal_compat_data st_416sas_cdata = {
+ .sys_compat = "st,stih416-front-syscfg",
+ .reg_fields = st_416sas_regfields,
+ .ops = &st_syscfg_sensor_ops,
+ .calibration_val = 16,
+ .temp_adjust_val = 20,
+ .crit_temp = 120,
+};
+
+/* Compatible device data for stid127 thermal sensor */
+const struct st_thermal_compat_data st_127_cdata = {
+ .sys_compat = "st,stid127-cpu-syscfg",
+ .reg_fields = st_127_regfields,
+ .ops = &st_syscfg_sensor_ops,
+ .calibration_val = 8,
+ .temp_adjust_val = -103,
+ .crit_temp = 120,
+};
+
+static struct of_device_id st_syscfg_thermal_of_match[] = {
+ { .compatible = "st,stih415-sas-thermal", .data = &st_415sas_cdata },
+ { .compatible = "st,stih415-mpe-thermal", .data = &st_415mpe_cdata },
+ { .compatible = "st,stih416-sas-thermal", .data = &st_416sas_cdata },
+ { .compatible = "st,stid127-thermal", .data = &st_127_cdata },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, st_syscfg_thermal_of_match);
+
+int st_syscfg_probe(struct platform_device *pdev)
+{
+ return st_thermal_register(pdev, st_syscfg_thermal_of_match);
+}
+
+int st_syscfg_remove(struct platform_device *pdev)
+{
+ return st_thermal_unregister(pdev);
+}
+
+static struct platform_driver st_syscfg_thermal_driver = {
+ .driver = {
+ .name = "st_syscfg_thermal",
+ .owner = THIS_MODULE,
+ .pm = &st_thermal_pm_ops,
+ .of_match_table = st_syscfg_thermal_of_match,
+ },
+ .probe = st_syscfg_probe,
+ .remove = st_syscfg_remove,
+};
+module_platform_driver(st_syscfg_thermal_driver);
+
+MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 4aff02d6712e..c78f43a481ce 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -47,10 +47,6 @@
#include <asm/irq.h>
-#ifdef CONFIG_SAMSUNG_CLOCK
-#include <plat/clock.h>
-#endif
-
#include "samsung.h"
#if defined(CONFIG_SERIAL_SAMSUNG_DEBUG) && \
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index ba1dbcdf4609..0e8c39b6ccd4 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -3383,12 +3383,11 @@ static int alloc_desc(struct slgt_info *info)
unsigned int pbufs;
/* allocate memory to hold descriptor lists */
- info->bufs = pci_alloc_consistent(info->pdev, DESC_LIST_SIZE, &info->bufs_dma_addr);
+ info->bufs = pci_zalloc_consistent(info->pdev, DESC_LIST_SIZE,
+ &info->bufs_dma_addr);
if (info->bufs == NULL)
return -ENOMEM;
- memset(info->bufs, 0, DESC_LIST_SIZE);
-
info->rbufs = (struct slgt_desc*)info->bufs;
info->tbufs = ((struct slgt_desc*)info->bufs) + info->rbuf_count;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 454b65898e2c..42bad18c66c9 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -355,7 +355,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(struct work_struct *ignored)
{
- out_of_memory(node_zonelist(first_online_node, GFP_KERNEL), GFP_KERNEL,
+ out_of_memory(node_zonelist(first_memory_node, GFP_KERNEL), GFP_KERNEL,
0, NULL, true);
}
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 36b6bce33b20..6d0f6080eceb 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -147,7 +147,7 @@ err0:
}
EXPORT_SYMBOL_GPL(usb_get_phy);
- /**
+/**
* devm_usb_get_phy_by_phandle - find the USB PHY by phandle
* @dev - device that requests this phy
* @phandle - name of the property holding the phy phandle value
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 9a26242baefa..715f299af6ea 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,9 +18,7 @@ config USB_STORAGE
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices. Some devices also
- will require 'Probe all LUNs on each SCSI device'
- (SCSI_MULTI_LUN).
+ (BLK_DEV_SD) for most USB storage devices.
To compile this driver as a module, choose M here: the
module will be called usb-storage.
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index 073a2c32ccc4..38a4504ce450 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -1498,7 +1498,7 @@ static int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int ret;
- usb_stor_dbg(us, "LUN=%d\n", srb->device->lun);
+ usb_stor_dbg(us, "LUN=%d\n", (u8)srb->device->lun);
switch (srb->device->lun) {
case 0:
@@ -1524,7 +1524,7 @@ static int dpcm_transport(struct scsi_cmnd *srb, struct us_data *us)
break;
default:
- usb_stor_dbg(us, "Invalid LUN %d\n", srb->device->lun);
+ usb_stor_dbg(us, "Invalid LUN %d\n", (u8)srb->device->lun);
ret = USB_STOR_TRANSPORT_ERROR;
break;
}
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index f1c96261a501..cedb29252a92 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -347,14 +347,16 @@ static int usb_stor_control_thread(void * __us)
*/
else if (us->srb->device->id &&
!(us->fflags & US_FL_SCM_MULT_TARG)) {
- usb_stor_dbg(us, "Bad target number (%d:%d)\n",
- us->srb->device->id, us->srb->device->lun);
+ usb_stor_dbg(us, "Bad target number (%d:%llu)\n",
+ us->srb->device->id,
+ us->srb->device->lun);
us->srb->result = DID_BAD_TARGET << 16;
}
else if (us->srb->device->lun > us->max_lun) {
- usb_stor_dbg(us, "Bad LUN (%d:%d)\n",
- us->srb->device->id, us->srb->device->lun);
+ usb_stor_dbg(us, "Bad LUN (%d:%llu)\n",
+ us->srb->device->id,
+ us->srb->device->lun);
us->srb->result = DID_BAD_TARGET << 16;
}
diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile
index 72bfabc8629e..50e30bc75e85 100644
--- a/drivers/vfio/Makefile
+++ b/drivers/vfio/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_VFIO) += vfio.o
obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o
obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o
+obj-$(CONFIG_EEH) += vfio_spapr_eeh.o
obj-$(CONFIG_VFIO_PCI) += pci/
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 010e0f8b8e4f..e2ee80f36e3e 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -157,8 +157,10 @@ static void vfio_pci_release(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
- if (atomic_dec_and_test(&vdev->refcnt))
+ if (atomic_dec_and_test(&vdev->refcnt)) {
+ vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
+ }
module_put(THIS_MODULE);
}
@@ -166,19 +168,27 @@ static void vfio_pci_release(void *device_data)
static int vfio_pci_open(void *device_data)
{
struct vfio_pci_device *vdev = device_data;
+ int ret;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
if (atomic_inc_return(&vdev->refcnt) == 1) {
- int ret = vfio_pci_enable(vdev);
+ ret = vfio_pci_enable(vdev);
+ if (ret)
+ goto error;
+
+ ret = vfio_spapr_pci_eeh_open(vdev->pdev);
if (ret) {
- module_put(THIS_MODULE);
- return ret;
+ vfio_pci_disable(vdev);
+ goto error;
}
}
return 0;
+error:
+ module_put(THIS_MODULE);
+ return ret;
}
static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index a84788ba662c..730b4ef3e0cc 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -156,7 +156,16 @@ static long tce_iommu_ioctl(void *iommu_data,
switch (cmd) {
case VFIO_CHECK_EXTENSION:
- return (arg == VFIO_SPAPR_TCE_IOMMU) ? 1 : 0;
+ switch (arg) {
+ case VFIO_SPAPR_TCE_IOMMU:
+ ret = 1;
+ break;
+ default:
+ ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
+ break;
+ }
+
+ return (ret < 0) ? 0 : ret;
case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
struct vfio_iommu_spapr_tce_info info;
@@ -283,6 +292,12 @@ static long tce_iommu_ioctl(void *iommu_data,
tce_iommu_disable(container);
mutex_unlock(&container->lock);
return 0;
+ case VFIO_EEH_PE_OP:
+ if (!container->tbl || !container->tbl->it_group)
+ return -ENODEV;
+
+ return vfio_spapr_iommu_eeh_ioctl(container->tbl->it_group,
+ cmd, arg);
}
return -ENOTTY;
diff --git a/drivers/vfio/vfio_spapr_eeh.c b/drivers/vfio/vfio_spapr_eeh.c
new file mode 100644
index 000000000000..f834b4ce1431
--- /dev/null
+++ b/drivers/vfio/vfio_spapr_eeh.c
@@ -0,0 +1,87 @@
+/*
+ * EEH functionality support for VFIO devices. The feature is only
+ * available on sPAPR compatible platforms.
+ *
+ * Copyright Gavin Shan, IBM Corporation 2014.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <asm/eeh.h>
+
+/* We might build address mapping here for "fast" path later */
+int vfio_spapr_pci_eeh_open(struct pci_dev *pdev)
+{
+ return eeh_dev_open(pdev);
+}
+
+void vfio_spapr_pci_eeh_release(struct pci_dev *pdev)
+{
+ eeh_dev_release(pdev);
+}
+
+long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group,
+ unsigned int cmd, unsigned long arg)
+{
+ struct eeh_pe *pe;
+ struct vfio_eeh_pe_op op;
+ unsigned long minsz;
+ long ret = -EINVAL;
+
+ switch (cmd) {
+ case VFIO_CHECK_EXTENSION:
+ if (arg == VFIO_EEH)
+ ret = eeh_enabled() ? 1 : 0;
+ else
+ ret = 0;
+ break;
+ case VFIO_EEH_PE_OP:
+ pe = eeh_iommu_group_to_pe(group);
+ if (!pe)
+ return -ENODEV;
+
+ minsz = offsetofend(struct vfio_eeh_pe_op, op);
+ if (copy_from_user(&op, (void __user *)arg, minsz))
+ return -EFAULT;
+ if (op.argsz < minsz || op.flags)
+ return -EINVAL;
+
+ switch (op.op) {
+ case VFIO_EEH_PE_DISABLE:
+ ret = eeh_pe_set_option(pe, EEH_OPT_DISABLE);
+ break;
+ case VFIO_EEH_PE_ENABLE:
+ ret = eeh_pe_set_option(pe, EEH_OPT_ENABLE);
+ break;
+ case VFIO_EEH_PE_UNFREEZE_IO:
+ ret = eeh_pe_set_option(pe, EEH_OPT_THAW_MMIO);
+ break;
+ case VFIO_EEH_PE_UNFREEZE_DMA:
+ ret = eeh_pe_set_option(pe, EEH_OPT_THAW_DMA);
+ break;
+ case VFIO_EEH_PE_GET_STATE:
+ ret = eeh_pe_get_state(pe);
+ break;
+ case VFIO_EEH_PE_RESET_DEACTIVATE:
+ ret = eeh_pe_reset(pe, EEH_RESET_DEACTIVATE);
+ break;
+ case VFIO_EEH_PE_RESET_HOT:
+ ret = eeh_pe_reset(pe, EEH_RESET_HOT);
+ break;
+ case VFIO_EEH_PE_RESET_FUNDAMENTAL:
+ ret = eeh_pe_reset(pe, EEH_RESET_FUNDAMENTAL);
+ break;
+ case VFIO_EEH_PE_CONFIGURE:
+ ret = eeh_pe_configure(pe);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 5d449059a556..8d03924749b8 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -178,17 +178,6 @@ config BACKLIGHT_ATMEL_LCDC
If in doubt, it's safe to enable this option; it doesn't kick
in unless the board's description says it's wired that way.
-config BACKLIGHT_ATMEL_PWM
- tristate "Atmel PWM backlight control"
- depends on ATMEL_PWM
- help
- Say Y here if you want to use the PWM peripheral in Atmel AT91 and
- AVR32 devices. This driver will need additional platform data to know
- which PWM instance to use and how to configure it.
-
- To compile this driver as a module, choose M here: the module will be
- called atmel-pwm-bl.
-
config BACKLIGHT_EP93XX
tristate "Cirrus EP93xx Backlight Driver"
depends on FB_EP93XX
@@ -207,6 +196,15 @@ config BACKLIGHT_GENERIC
known as the Corgi backlight driver. If you have a Sharp Zaurus
SL-C7xx, SL-Cxx00 or SL-6000x say y.
+config BACKLIGHT_IPAQ_MICRO
+ tristate "iPAQ microcontroller backlight driver"
+ depends on MFD_IPAQ_MICRO
+ default y
+ help
+ Say y to enable the backlight driver for Compaq iPAQ handheld
+ computers. Say yes if you have one of the h3100/h3600/h3700
+ machines.
+
config BACKLIGHT_LM3533
tristate "Backlight Driver for LM3533"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index bb820024f346..fcd50b732165 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
obj-$(CONFIG_BACKLIGHT_AS3711) += as3711_bl.o
-obj-$(CONFIG_BACKLIGHT_ATMEL_PWM) += atmel-pwm-bl.o
obj-$(CONFIG_BACKLIGHT_BD6107) += bd6107.o
obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o
@@ -36,6 +35,7 @@ obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
+obj-$(CONFIG_BACKLIGHT_IPAQ_MICRO) += ipaq_micro_bl.o
obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o
obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index ec5350f2c28a..86234c31d79c 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -67,11 +67,6 @@ static inline int aat2870_bl_disable(struct aat2870_bl_driver_data *aat2870_bl)
return aat2870->write(aat2870, AAT2870_BL_CH_EN, 0x0);
}
-static int aat2870_bl_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static int aat2870_bl_update_status(struct backlight_device *bd)
{
struct aat2870_bl_driver_data *aat2870_bl = bl_get_data(bd);
@@ -120,7 +115,6 @@ static int aat2870_bl_check_fb(struct backlight_device *bd, struct fb_info *fi)
static const struct backlight_ops aat2870_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
- .get_brightness = aat2870_bl_get_brightness,
.update_status = aat2870_bl_update_status,
.check_fb = aat2870_bl_check_fb,
};
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index d8952c4aa689..4726c8be626f 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -410,11 +410,6 @@ static int ams369fg06_set_power(struct lcd_device *ld, int power)
return ams369fg06_power(lcd, power);
}
-static int ams369fg06_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static int ams369fg06_set_brightness(struct backlight_device *bd)
{
int ret = 0;
@@ -443,7 +438,6 @@ static struct lcd_ops ams369fg06_lcd_ops = {
};
static const struct backlight_ops ams369fg06_backlight_ops = {
- .get_brightness = ams369fg06_get_brightness,
.update_status = ams369fg06_set_brightness,
};
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
deleted file mode 100644
index 261b1a4ec3d8..000000000000
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (C) 2008 Atmel Corporation
- *
- * Backlight driver using Atmel PWM peripheral.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/backlight.h>
-#include <linux/atmel_pwm.h>
-#include <linux/atmel-pwm-bl.h>
-#include <linux/slab.h>
-
-struct atmel_pwm_bl {
- const struct atmel_pwm_bl_platform_data *pdata;
- struct backlight_device *bldev;
- struct platform_device *pdev;
- struct pwm_channel pwmc;
- int gpio_on;
-};
-
-static void atmel_pwm_bl_set_gpio_on(struct atmel_pwm_bl *pwmbl, int on)
-{
- if (!gpio_is_valid(pwmbl->gpio_on))
- return;
-
- gpio_set_value(pwmbl->gpio_on, on ^ pwmbl->pdata->on_active_low);
-}
-
-static int atmel_pwm_bl_set_intensity(struct backlight_device *bd)
-{
- struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
- int intensity = bd->props.brightness;
- int pwm_duty;
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- intensity = 0;
-
- if (pwmbl->pdata->pwm_active_low)
- pwm_duty = pwmbl->pdata->pwm_duty_min + intensity;
- else
- pwm_duty = pwmbl->pdata->pwm_duty_max - intensity;
-
- if (pwm_duty > pwmbl->pdata->pwm_duty_max)
- pwm_duty = pwmbl->pdata->pwm_duty_max;
- if (pwm_duty < pwmbl->pdata->pwm_duty_min)
- pwm_duty = pwmbl->pdata->pwm_duty_min;
-
- if (!intensity) {
- atmel_pwm_bl_set_gpio_on(pwmbl, 0);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
- pwm_channel_disable(&pwmbl->pwmc);
- } else {
- pwm_channel_enable(&pwmbl->pwmc);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty);
- atmel_pwm_bl_set_gpio_on(pwmbl, 1);
- }
-
- return 0;
-}
-
-static int atmel_pwm_bl_get_intensity(struct backlight_device *bd)
-{
- struct atmel_pwm_bl *pwmbl = bl_get_data(bd);
- u32 cdty;
- u32 intensity;
-
- cdty = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY);
- if (pwmbl->pdata->pwm_active_low)
- intensity = cdty - pwmbl->pdata->pwm_duty_min;
- else
- intensity = pwmbl->pdata->pwm_duty_max - cdty;
-
- return intensity & 0xffff;
-}
-
-static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
-{
- unsigned long pwm_rate = pwmbl->pwmc.mck;
- unsigned long prescale = DIV_ROUND_UP(pwm_rate,
- (pwmbl->pdata->pwm_frequency *
- pwmbl->pdata->pwm_compare_max)) - 1;
-
- /*
- * Prescale must be power of two and maximum 0xf in size because of
- * hardware limit. PWM speed will be:
- * PWM module clock speed / (2 ^ prescale).
- */
- prescale = fls(prescale);
- if (prescale > 0xf)
- prescale = 0xf;
-
- pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY,
- pwmbl->pdata->pwm_duty_min +
- pwmbl->bldev->props.brightness);
- pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD,
- pwmbl->pdata->pwm_compare_max);
-
- dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver (%lu Hz)\n",
- pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max /
- (1 << prescale));
-
- return pwm_channel_enable(&pwmbl->pwmc);
-}
-
-static const struct backlight_ops atmel_pwm_bl_ops = {
- .get_brightness = atmel_pwm_bl_get_intensity,
- .update_status = atmel_pwm_bl_set_intensity,
-};
-
-static int atmel_pwm_bl_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- const struct atmel_pwm_bl_platform_data *pdata;
- struct backlight_device *bldev;
- struct atmel_pwm_bl *pwmbl;
- unsigned long flags;
- int retval;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata)
- return -ENODEV;
-
- if (pdata->pwm_compare_max < pdata->pwm_duty_max ||
- pdata->pwm_duty_min > pdata->pwm_duty_max ||
- pdata->pwm_frequency == 0)
- return -EINVAL;
-
- pwmbl = devm_kzalloc(&pdev->dev, sizeof(struct atmel_pwm_bl),
- GFP_KERNEL);
- if (!pwmbl)
- return -ENOMEM;
-
- pwmbl->pdev = pdev;
- pwmbl->pdata = pdata;
- pwmbl->gpio_on = pdata->gpio_on;
-
- retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc);
- if (retval)
- return retval;
-
- if (gpio_is_valid(pwmbl->gpio_on)) {
- /* Turn display off by default. */
- if (pdata->on_active_low)
- flags = GPIOF_OUT_INIT_HIGH;
- else
- flags = GPIOF_OUT_INIT_LOW;
-
- retval = devm_gpio_request_one(&pdev->dev, pwmbl->gpio_on,
- flags, "gpio_atmel_pwm_bl");
- if (retval)
- goto err_free_pwm;
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
- bldev = devm_backlight_device_register(&pdev->dev, "atmel-pwm-bl",
- &pdev->dev, pwmbl, &atmel_pwm_bl_ops,
- &props);
- if (IS_ERR(bldev)) {
- retval = PTR_ERR(bldev);
- goto err_free_pwm;
- }
-
- pwmbl->bldev = bldev;
-
- platform_set_drvdata(pdev, pwmbl);
-
- /* Power up the backlight by default at middle intesity. */
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = bldev->props.max_brightness / 2;
-
- retval = atmel_pwm_bl_init_pwm(pwmbl);
- if (retval)
- goto err_free_pwm;
-
- atmel_pwm_bl_set_intensity(bldev);
-
- return 0;
-
-err_free_pwm:
- pwm_channel_free(&pwmbl->pwmc);
-
- return retval;
-}
-
-static int atmel_pwm_bl_remove(struct platform_device *pdev)
-{
- struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev);
-
- atmel_pwm_bl_set_gpio_on(pwmbl, 0);
- pwm_channel_disable(&pwmbl->pwmc);
- pwm_channel_free(&pwmbl->pwmc);
-
- return 0;
-}
-
-static struct platform_driver atmel_pwm_bl_driver = {
- .driver = {
- .name = "atmel-pwm-bl",
- },
- /* REVISIT add suspend() and resume() */
- .probe = atmel_pwm_bl_probe,
- .remove = atmel_pwm_bl_remove,
-};
-
-module_platform_driver(atmel_pwm_bl_driver);
-
-MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>");
-MODULE_DESCRIPTION("Atmel PWM backlight driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:atmel-pwm-bl");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 428089009cd5..bddc8b17a4d8 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -223,6 +223,8 @@ static ssize_t actual_brightness_show(struct device *dev,
mutex_lock(&bd->ops_lock);
if (bd->ops && bd->ops->get_brightness)
rc = sprintf(buf, "%d\n", bd->ops->get_brightness(bd));
+ else
+ rc = sprintf(buf, "%d\n", bd->props.brightness);
mutex_unlock(&bd->ops_lock);
return rc;
diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c
index 16dd9bc625bd..fdb2f7e2c6b5 100644
--- a/drivers/video/backlight/bd6107.c
+++ b/drivers/video/backlight/bd6107.c
@@ -105,11 +105,6 @@ static int bd6107_backlight_update_status(struct backlight_device *backlight)
return 0;
}
-static int bd6107_backlight_get_brightness(struct backlight_device *backlight)
-{
- return backlight->props.brightness;
-}
-
static int bd6107_backlight_check_fb(struct backlight_device *backlight,
struct fb_info *info)
{
@@ -121,7 +116,6 @@ static int bd6107_backlight_check_fb(struct backlight_device *backlight,
static const struct backlight_ops bd6107_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = bd6107_backlight_update_status,
- .get_brightness = bd6107_backlight_get_brightness,
.check_fb = bd6107_backlight_check_fb,
};
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index 1cea68848f1a..aaead04a2d54 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -44,11 +44,6 @@ static int gpio_backlight_update_status(struct backlight_device *bl)
return 0;
}
-static int gpio_backlight_get_brightness(struct backlight_device *bl)
-{
- return bl->props.brightness;
-}
-
static int gpio_backlight_check_fb(struct backlight_device *bl,
struct fb_info *info)
{
@@ -60,7 +55,6 @@ static int gpio_backlight_check_fb(struct backlight_device *bl,
static const struct backlight_ops gpio_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = gpio_backlight_update_status,
- .get_brightness = gpio_backlight_get_brightness,
.check_fb = gpio_backlight_check_fb,
};
diff --git a/drivers/video/backlight/ipaq_micro_bl.c b/drivers/video/backlight/ipaq_micro_bl.c
new file mode 100644
index 000000000000..347dc11d4ceb
--- /dev/null
+++ b/drivers/video/backlight/ipaq_micro_bl.c
@@ -0,0 +1,83 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * iPAQ microcontroller backlight support
+ * Author : Linus Walleij <linus.walleij@linaro.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/mfd/ipaq-micro.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static int micro_bl_update_status(struct backlight_device *bd)
+{
+ struct ipaq_micro *micro = dev_get_drvdata(&bd->dev);
+ int intensity = bd->props.brightness;
+ struct ipaq_micro_msg msg = {
+ .id = MSG_BACKLIGHT,
+ .tx_len = 3,
+ };
+
+ if (bd->props.power != FB_BLANK_UNBLANK)
+ intensity = 0;
+ if (bd->props.state & (BL_CORE_FBBLANK | BL_CORE_SUSPENDED))
+ intensity = 0;
+
+ /*
+ * Message format:
+ * Byte 0: backlight instance (usually 1)
+ * Byte 1: on/off
+ * Byte 2: intensity, 0-255
+ */
+ msg.tx_data[0] = 0x01;
+ msg.tx_data[1] = intensity > 0 ? 1 : 0;
+ msg.tx_data[2] = intensity;
+ return ipaq_micro_tx_msg_sync(micro, &msg);
+}
+
+static const struct backlight_ops micro_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = micro_bl_update_status,
+};
+
+static struct backlight_properties micro_bl_props = {
+ .type = BACKLIGHT_RAW,
+ .max_brightness = 255,
+ .power = FB_BLANK_UNBLANK,
+ .brightness = 64,
+};
+
+static int micro_backlight_probe(struct platform_device *pdev)
+{
+ struct backlight_device *bd;
+ struct ipaq_micro *micro = dev_get_drvdata(pdev->dev.parent);
+
+ bd = devm_backlight_device_register(&pdev->dev, "ipaq-micro-backlight",
+ &pdev->dev, micro, &micro_bl_ops,
+ &micro_bl_props);
+ if (IS_ERR(bd))
+ return PTR_ERR(bd);
+
+ platform_set_drvdata(pdev, bd);
+ backlight_update_status(bd);
+
+ return 0;
+}
+
+static struct platform_driver micro_backlight_device_driver = {
+ .driver = {
+ .name = "ipaq-micro-backlight",
+ },
+ .probe = micro_backlight_probe,
+};
+module_platform_driver(micro_backlight_device_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("driver for iPAQ Atmel micro backlight");
+MODULE_ALIAS("platform:ipaq-micro-backlight");
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index da3876c9b3ae..228bc319de19 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -43,37 +43,38 @@ static int jornada_lcd_get_contrast(struct lcd_device *ld)
jornada_ssp_start();
- if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
- dev_err(&ld->dev, "get contrast failed\n");
- jornada_ssp_end();
- return -ETIMEDOUT;
- } else {
+ if (jornada_ssp_byte(GETCONTRAST) == TXDUMMY) {
ret = jornada_ssp_byte(TXDUMMY);
- jornada_ssp_end();
- return ret;
+ goto success;
}
+
+ dev_err(&ld->dev, "failed to set contrast\n");
+ ret = -ETIMEDOUT;
+
+success:
+ jornada_ssp_end();
+ return ret;
}
static int jornada_lcd_set_contrast(struct lcd_device *ld, int value)
{
- int ret;
+ int ret = 0;
jornada_ssp_start();
/* start by sending our set contrast cmd to mcu */
- ret = jornada_ssp_byte(SETCONTRAST);
-
- /* push the new value */
- if (jornada_ssp_byte(value) != TXDUMMY) {
- dev_err(&ld->dev, "set contrast failed\n");
- jornada_ssp_end();
- return -ETIMEDOUT;
+ if (jornada_ssp_byte(SETCONTRAST) == TXDUMMY) {
+ /* if successful push the new value */
+ if (jornada_ssp_byte(value) == TXDUMMY)
+ goto success;
}
- /* if we get here we can assume everything went well */
- jornada_ssp_end();
+ dev_err(&ld->dev, "failed to set contrast\n");
+ ret = -ETIMEDOUT;
- return 0;
+success:
+ jornada_ssp_end();
+ return ret;
}
static int jornada_lcd_set_power(struct lcd_device *ld, int power)
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index 506a6c236039..ccb44e8e4927 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -642,11 +642,6 @@ static int ld9040_get_power(struct lcd_device *ld)
return lcd->power;
}
-static int ld9040_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static int ld9040_set_brightness(struct backlight_device *bd)
{
int ret = 0, brightness = bd->props.brightness;
@@ -674,7 +669,6 @@ static struct lcd_ops ld9040_lcd_ops = {
};
static const struct backlight_ops ld9040_backlight_ops = {
- .get_brightness = ld9040_get_brightness,
.update_status = ld9040_set_brightness,
};
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 2ca3a040007b..dcdd5443efcf 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -274,15 +274,9 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
return 0;
}
-static int lp855x_bl_get_brightness(struct backlight_device *bl)
-{
- return bl->props.brightness;
-}
-
static const struct backlight_ops lp855x_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = lp855x_bl_update_status,
- .get_brightness = lp855x_bl_get_brightness,
};
static int lp855x_backlight_register(struct lp855x *lp)
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index daba34dc46d4..d6c4f6a2d43e 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -176,15 +176,9 @@ static int lp8788_bl_update_status(struct backlight_device *bl_dev)
return 0;
}
-static int lp8788_bl_get_brightness(struct backlight_device *bl_dev)
-{
- return bl_dev->props.brightness;
-}
-
static const struct backlight_ops lp8788_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = lp8788_bl_update_status,
- .get_brightness = lp8788_bl_get_brightness,
};
static int lp8788_backlight_register(struct lp8788_bl *bl)
diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c
index 1802b2d1357d..8ab7297b118a 100644
--- a/drivers/video/backlight/lv5207lp.c
+++ b/drivers/video/backlight/lv5207lp.c
@@ -70,11 +70,6 @@ static int lv5207lp_backlight_update_status(struct backlight_device *backlight)
return 0;
}
-static int lv5207lp_backlight_get_brightness(struct backlight_device *backlight)
-{
- return backlight->props.brightness;
-}
-
static int lv5207lp_backlight_check_fb(struct backlight_device *backlight,
struct fb_info *info)
{
@@ -86,7 +81,6 @@ static int lv5207lp_backlight_check_fb(struct backlight_device *backlight,
static const struct backlight_ops lv5207lp_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = lv5207lp_backlight_update_status,
- .get_brightness = lv5207lp_backlight_get_brightness,
.check_fb = lv5207lp_backlight_check_fb,
};
diff --git a/drivers/video/backlight/pandora_bl.c b/drivers/video/backlight/pandora_bl.c
index 2098c5d6efb9..2e3f82063c03 100644
--- a/drivers/video/backlight/pandora_bl.c
+++ b/drivers/video/backlight/pandora_bl.c
@@ -100,15 +100,9 @@ done:
return 0;
}
-static int pandora_backlight_get_brightness(struct backlight_device *bl)
-{
- return bl->props.brightness;
-}
-
static const struct backlight_ops pandora_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = pandora_backlight_update_status,
- .get_brightness = pandora_backlight_get_brightness,
};
static int pandora_backlight_probe(struct platform_device *pdev)
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 38ca88bc5c3e..d7a3d13e72ec 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -115,11 +115,6 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
return 0;
}
-static int pwm_backlight_get_brightness(struct backlight_device *bl)
-{
- return bl->props.brightness;
-}
-
static int pwm_backlight_check_fb(struct backlight_device *bl,
struct fb_info *info)
{
@@ -130,7 +125,6 @@ static int pwm_backlight_check_fb(struct backlight_device *bl,
static const struct backlight_ops pwm_backlight_ops = {
.update_status = pwm_backlight_update_status,
- .get_brightness = pwm_backlight_get_brightness,
.check_fb = pwm_backlight_check_fb,
};
@@ -245,13 +239,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->dev = &pdev->dev;
pb->enabled = false;
- pb->enable_gpio = devm_gpiod_get(&pdev->dev, "enable");
+ pb->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable");
if (IS_ERR(pb->enable_gpio)) {
ret = PTR_ERR(pb->enable_gpio);
- if (ret == -ENOENT)
- pb->enable_gpio = NULL;
- else
- goto err_alloc;
+ goto err_alloc;
}
/*
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index 2d6d48196c6d..f3a65c8940ed 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -597,11 +597,6 @@ static int s6e63m0_get_power(struct lcd_device *ld)
return lcd->power;
}
-static int s6e63m0_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static int s6e63m0_set_brightness(struct backlight_device *bd)
{
int ret = 0, brightness = bd->props.brightness;
@@ -629,7 +624,6 @@ static struct lcd_ops s6e63m0_lcd_ops = {
};
static const struct backlight_ops s6e63m0_backlight_ops = {
- .get_brightness = s6e63m0_get_brightness,
.update_status = s6e63m0_set_brightness,
};
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 595dcf561020..2e04d93aa0ef 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -109,15 +109,9 @@ static int tps65217_bl_update_status(struct backlight_device *bl)
return rc;
}
-static int tps65217_bl_get_brightness(struct backlight_device *bl)
-{
- return bl->props.brightness;
-}
-
static const struct backlight_ops tps65217_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
.update_status = tps65217_bl_update_status,
- .get_brightness = tps65217_bl_get_brightness
};
static int tps65217_bl_hw_init(struct tps65217_bl *tps65217_bl,
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 552258c8f99d..17f21cedff9b 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -49,12 +49,6 @@
#error wrong architecture for the MC68x328 frame buffer device
#endif
-#if defined(CONFIG_FB_68328_INVERT)
-#define MC68X328FB_MONO_VISUAL FB_VISUAL_MONO01
-#else
-#define MC68X328FB_MONO_VISUAL FB_VISUAL_MONO10
-#endif
-
static u_long videomemory;
static u_long videomemorysize;
@@ -462,7 +456,7 @@ int __init mc68x328fb_init(void)
fb_info.fix.line_length =
get_line_length(mc68x328fb_default.xres_virtual, mc68x328fb_default.bits_per_pixel);
fb_info.fix.visual = (mc68x328fb_default.bits_per_pixel) == 1 ?
- MC68X328FB_MONO_VISUAL : FB_VISUAL_PSEUDOCOLOR;
+ FB_VISUAL_MONO10 : FB_VISUAL_PSEUDOCOLOR;
if (fb_info.var.bits_per_pixel == 1) {
fb_info.var.red.length = fb_info.var.green.length = fb_info.var.blue.length = 1;
fb_info.var.red.offset = fb_info.var.green.offset = fb_info.var.blue.offset = 0;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 59c98bfd5a8a..e911b9c96e19 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -280,6 +280,8 @@ config FB_ARMCLCD
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+ select FB_MODE_HELPERS if OF
+ select VIDEOMODE_HELPERS if OF
help
This framebuffer device driver is for the ARM PrimeCell PL110
Colour LCD controller. ARM PrimeCells provide the building
@@ -290,6 +292,12 @@ config FB_ARMCLCD
here and read <file:Documentation/kbuild/modules.txt>. The module
will be called amba-clcd.
+# Helper logic selected only by the ARM Versatile platform family.
+config PLAT_VERSATILE_CLCD
+ def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
+ depends on ARM
+ depends on FB_ARMCLCD && FB=y
+
config FB_ACORN
bool "Acorn VIDC support"
depends on (FB = y) && ARM && ARCH_ACORN
@@ -301,15 +309,26 @@ config FB_ACORN
hardware found in Acorn RISC PCs and other ARM-based machines. If
unsure, say N.
-config FB_CLPS711X
- bool "CLPS711X LCD support"
- depends on (FB = y) && ARM && ARCH_CLPS711X
+config FB_CLPS711X_OLD
+ tristate
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
+
+config FB_CLPS711X
+ tristate "CLPS711X LCD support"
+ depends on FB && (ARCH_CLPS711X || COMPILE_TEST)
+ select FB_CLPS711X_OLD if ARCH_CLPS711X && !ARCH_MULTIPLATFORM
+ select BACKLIGHT_LCD_SUPPORT
+ select FB_MODE_HELPERS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select LCD_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
help
- Say Y to enable the Framebuffer driver for the CLPS7111 and
- EP7212 processors.
+ Say Y to enable the Framebuffer driver for the Cirrus Logic
+ CLPS711X CPUs.
config FB_SA1100
bool "SA-1100 LCD support"
@@ -2018,8 +2037,8 @@ config FB_TMIO_ACCELL
config FB_S3C
tristate "Samsung S3C framebuffer support"
- depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || ARCH_S5P64X0 || \
- ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
+ depends on FB && (CPU_S3C2416 || ARCH_S3C64XX || \
+ ARCH_S5PV210 || ARCH_EXYNOS)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 0284f2a12538..1979afffccfe 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -14,7 +14,8 @@ obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o
# Hardware specific drivers go first
obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o
obj-$(CONFIG_FB_ARC) += arcfb.o
-obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o
+obj-$(CONFIG_FB_CLPS711X) += clps711x-fb.o
+obj-$(CONFIG_FB_CLPS711X_OLD) += clps711xfb.o
obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o
obj-$(CONFIG_FB_GRVGA) += grvga.o
obj-$(CONFIG_FB_PM2) += pm2fb.o
@@ -78,6 +79,7 @@ obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
+obj-$(CONFIG_PLAT_VERSATILE_CLCD) += amba-clcd-versatile.o
obj-$(CONFIG_FB_GOLDFISH) += goldfishfb.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c
new file mode 100644
index 000000000000..7a8afcd4573e
--- /dev/null
+++ b/drivers/video/fbdev/amba-clcd-versatile.c
@@ -0,0 +1,182 @@
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/clcd.h>
+#include <linux/platform_data/video-clcd-versatile.h>
+
+static struct clcd_panel vga = {
+ .mode = {
+ .name = "VGA",
+ .refresh = 60,
+ .xres = 640,
+ .yres = 480,
+ .pixclock = 39721,
+ .left_margin = 40,
+ .right_margin = 24,
+ .upper_margin = 32,
+ .lower_margin = 11,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
+ .bpp = 16,
+};
+
+static struct clcd_panel xvga = {
+ .mode = {
+ .name = "XVGA",
+ .refresh = 60,
+ .xres = 1024,
+ .yres = 768,
+ .pixclock = 15748,
+ .left_margin = 152,
+ .right_margin = 48,
+ .upper_margin = 23,
+ .lower_margin = 3,
+ .hsync_len = 104,
+ .vsync_len = 4,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551 | CLCD_CAP_565 | CLCD_CAP_888,
+ .bpp = 16,
+};
+
+/* Sanyo TM38QV67A02A - 3.8 inch QVGA (320x240) Color TFT */
+static struct clcd_panel sanyo_tm38qv67a02a = {
+ .mode = {
+ .name = "Sanyo TM38QV67A02A",
+ .refresh = 116,
+ .xres = 320,
+ .yres = 240,
+ .pixclock = 100000,
+ .left_margin = 6,
+ .right_margin = 6,
+ .upper_margin = 5,
+ .lower_margin = 5,
+ .hsync_len = 6,
+ .vsync_len = 6,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
+ .bpp = 16,
+};
+
+static struct clcd_panel sanyo_2_5_in = {
+ .mode = {
+ .name = "Sanyo QVGA Portrait",
+ .refresh = 116,
+ .xres = 240,
+ .yres = 320,
+ .pixclock = 100000,
+ .left_margin = 20,
+ .right_margin = 10,
+ .upper_margin = 2,
+ .lower_margin = 2,
+ .hsync_len = 10,
+ .vsync_len = 2,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_IVS | TIM2_IHS | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
+ .bpp = 16,
+};
+
+/* Epson L2F50113T00 - 2.2 inch 176x220 Color TFT */
+static struct clcd_panel epson_l2f50113t00 = {
+ .mode = {
+ .name = "Epson L2F50113T00",
+ .refresh = 390,
+ .xres = 176,
+ .yres = 220,
+ .pixclock = 62500,
+ .left_margin = 3,
+ .right_margin = 2,
+ .upper_margin = 1,
+ .lower_margin = 0,
+ .hsync_len = 3,
+ .vsync_len = 2,
+ .sync = 0,
+ .vmode = FB_VMODE_NONINTERLACED,
+ },
+ .width = -1,
+ .height = -1,
+ .tim2 = TIM2_BCD | TIM2_IPC,
+ .cntl = CNTL_LCDTFT | CNTL_BGR | CNTL_LCDVCOMP(1),
+ .caps = CLCD_CAP_5551,
+ .bpp = 16,
+};
+
+static struct clcd_panel *panels[] = {
+ &vga,
+ &xvga,
+ &sanyo_tm38qv67a02a,
+ &sanyo_2_5_in,
+ &epson_l2f50113t00,
+};
+
+struct clcd_panel *versatile_clcd_get_panel(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(panels); i++)
+ if (strcmp(panels[i]->mode.name, name) == 0)
+ break;
+
+ if (i < ARRAY_SIZE(panels))
+ return panels[i];
+
+ pr_err("CLCD: couldn't get parameters for panel %s\n", name);
+
+ return NULL;
+}
+
+int versatile_clcd_setup_dma(struct clcd_fb *fb, unsigned long framesize)
+{
+ dma_addr_t dma;
+
+ fb->fb.screen_base = dma_alloc_writecombine(&fb->dev->dev, framesize,
+ &dma, GFP_KERNEL);
+ if (!fb->fb.screen_base) {
+ pr_err("CLCD: unable to map framebuffer\n");
+ return -ENOMEM;
+ }
+
+ fb->fb.fix.smem_start = dma;
+ fb->fb.fix.smem_len = framesize;
+
+ return 0;
+}
+
+int versatile_clcd_mmap_dma(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(&fb->dev->dev, vma,
+ fb->fb.screen_base,
+ fb->fb.fix.smem_start,
+ fb->fb.fix.smem_len);
+}
+
+void versatile_clcd_remove_dma(struct clcd_fb *fb)
+{
+ dma_free_writecombine(&fb->dev->dev, fb->fb.fix.smem_len,
+ fb->fb.screen_base, fb->fb.fix.smem_start);
+}
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 14d6b3793e0a..beadd3edaa17 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -26,6 +26,13 @@
#include <linux/amba/clcd.h>
#include <linux/clk.h>
#include <linux/hardirq.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
#include <asm/sizes.h>
@@ -543,6 +550,259 @@ static int clcdfb_register(struct clcd_fb *fb)
return ret;
}
+#ifdef CONFIG_OF
+static int clcdfb_of_get_dpi_panel_mode(struct device_node *node,
+ struct fb_videomode *mode)
+{
+ int err;
+ struct display_timing timing;
+ struct videomode video;
+
+ err = of_get_display_timing(node, "panel-timing", &timing);
+ if (err)
+ return err;
+
+ videomode_from_timing(&timing, &video);
+
+ err = fb_videomode_from_videomode(&video, mode);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int clcdfb_snprintf_mode(char *buf, int size, struct fb_videomode *mode)
+{
+ return snprintf(buf, size, "%ux%u@%u", mode->xres, mode->yres,
+ mode->refresh);
+}
+
+static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint,
+ struct fb_videomode *mode)
+{
+ int err;
+ struct device_node *panel;
+ char *name;
+ int len;
+
+ panel = of_graph_get_remote_port_parent(endpoint);
+ if (!panel)
+ return -ENODEV;
+
+ /* Only directly connected DPI panels supported for now */
+ if (of_device_is_compatible(panel, "panel-dpi"))
+ err = clcdfb_of_get_dpi_panel_mode(panel, mode);
+ else
+ err = -ENOENT;
+ if (err)
+ return err;
+
+ len = clcdfb_snprintf_mode(NULL, 0, mode);
+ name = devm_kzalloc(dev, len + 1, GFP_KERNEL);
+ clcdfb_snprintf_mode(name, len + 1, mode);
+ mode->name = name;
+
+ return 0;
+}
+
+static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0)
+{
+ static struct {
+ unsigned int part;
+ u32 r0, g0, b0;
+ u32 caps;
+ } panels[] = {
+ { 0x110, 1, 7, 13, CLCD_CAP_5551 },
+ { 0x110, 0, 8, 16, CLCD_CAP_888 },
+ { 0x111, 4, 14, 20, CLCD_CAP_444 },
+ { 0x111, 3, 11, 19, CLCD_CAP_444 | CLCD_CAP_5551 },
+ { 0x111, 3, 10, 19, CLCD_CAP_444 | CLCD_CAP_5551 |
+ CLCD_CAP_565 },
+ { 0x111, 0, 8, 16, CLCD_CAP_444 | CLCD_CAP_5551 |
+ CLCD_CAP_565 | CLCD_CAP_888 },
+ };
+ int i;
+
+ /* Bypass pixel clock divider, data output on the falling edge */
+ fb->panel->tim2 = TIM2_BCD | TIM2_IPC;
+
+ /* TFT display, vert. comp. interrupt at the start of the back porch */
+ fb->panel->cntl |= CNTL_LCDTFT | CNTL_LCDVCOMP(1);
+
+ fb->panel->caps = 0;
+
+ /* Match the setup with known variants */
+ for (i = 0; i < ARRAY_SIZE(panels) && !fb->panel->caps; i++) {
+ if (amba_part(fb->dev) != panels[i].part)
+ continue;
+ if (g0 != panels[i].g0)
+ continue;
+ if (r0 == panels[i].r0 && b0 == panels[i].b0)
+ fb->panel->caps = panels[i].caps & CLCD_CAP_RGB;
+ if (r0 == panels[i].b0 && b0 == panels[i].r0)
+ fb->panel->caps = panels[i].caps & CLCD_CAP_BGR;
+ }
+
+ return fb->panel->caps ? 0 : -EINVAL;
+}
+
+static int clcdfb_of_init_display(struct clcd_fb *fb)
+{
+ struct device_node *endpoint;
+ int err;
+ u32 max_bandwidth;
+ u32 tft_r0b0g0[3];
+
+ fb->panel = devm_kzalloc(&fb->dev->dev, sizeof(*fb->panel), GFP_KERNEL);
+ if (!fb->panel)
+ return -ENOMEM;
+
+ endpoint = of_graph_get_next_endpoint(fb->dev->dev.of_node, NULL);
+ if (!endpoint)
+ return -ENODEV;
+
+ err = clcdfb_of_get_mode(&fb->dev->dev, endpoint, &fb->panel->mode);
+ if (err)
+ return err;
+
+ err = of_property_read_u32(fb->dev->dev.of_node, "max-memory-bandwidth",
+ &max_bandwidth);
+ if (!err)
+ fb->panel->bpp = 8 * max_bandwidth / (fb->panel->mode.xres *
+ fb->panel->mode.yres * fb->panel->mode.refresh);
+ else
+ fb->panel->bpp = 32;
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ fb->panel->cntl |= CNTL_BEBO;
+#endif
+ fb->panel->width = -1;
+ fb->panel->height = -1;
+
+ if (of_property_read_u32_array(endpoint,
+ "arm,pl11x,tft-r0g0b0-pads",
+ tft_r0b0g0, ARRAY_SIZE(tft_r0b0g0)) == 0)
+ return clcdfb_of_init_tft_panel(fb, tft_r0b0g0[0],
+ tft_r0b0g0[1], tft_r0b0g0[2]);
+
+ return -ENOENT;
+}
+
+static int clcdfb_of_vram_setup(struct clcd_fb *fb)
+{
+ int err;
+ struct device_node *memory;
+ u64 size;
+
+ err = clcdfb_of_init_display(fb);
+ if (err)
+ return err;
+
+ memory = of_parse_phandle(fb->dev->dev.of_node, "memory-region", 0);
+ if (!memory)
+ return -ENODEV;
+
+ fb->fb.screen_base = of_iomap(memory, 0);
+ if (!fb->fb.screen_base)
+ return -ENOMEM;
+
+ fb->fb.fix.smem_start = of_translate_address(memory,
+ of_get_address(memory, 0, &size, NULL));
+ fb->fb.fix.smem_len = size;
+
+ return 0;
+}
+
+static int clcdfb_of_vram_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ unsigned long off, user_size, kernel_size;
+
+
+ off = vma->vm_pgoff << PAGE_SHIFT;
+ user_size = vma->vm_end - vma->vm_start;
+ kernel_size = fb->fb.fix.smem_len;
+
+ if (off >= kernel_size || user_size > (kernel_size - off))
+ return -ENXIO;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(fb->fb.fix.smem_start) + vma->vm_pgoff,
+ user_size,
+ pgprot_writecombine(vma->vm_page_prot));
+}
+
+static void clcdfb_of_vram_remove(struct clcd_fb *fb)
+{
+ iounmap(fb->fb.screen_base);
+}
+
+static int clcdfb_of_dma_setup(struct clcd_fb *fb)
+{
+ unsigned long framesize;
+ dma_addr_t dma;
+ int err;
+
+ err = clcdfb_of_init_display(fb);
+ if (err)
+ return err;
+
+ framesize = fb->panel->mode.xres * fb->panel->mode.yres *
+ fb->panel->bpp / 8;
+ fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize,
+ &dma, GFP_KERNEL);
+ if (!fb->fb.screen_base)
+ return -ENOMEM;
+
+ fb->fb.fix.smem_start = dma;
+ fb->fb.fix.smem_len = framesize;
+
+ return 0;
+}
+
+static int clcdfb_of_dma_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
+{
+ return dma_mmap_writecombine(&fb->dev->dev, vma, fb->fb.screen_base,
+ fb->fb.fix.smem_start, fb->fb.fix.smem_len);
+}
+
+static void clcdfb_of_dma_remove(struct clcd_fb *fb)
+{
+ dma_free_coherent(&fb->dev->dev, fb->fb.fix.smem_len,
+ fb->fb.screen_base, fb->fb.fix.smem_start);
+}
+
+static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
+{
+ struct clcd_board *board = devm_kzalloc(&dev->dev, sizeof(*board),
+ GFP_KERNEL);
+ struct device_node *node = dev->dev.of_node;
+
+ if (!board)
+ return NULL;
+
+ board->name = of_node_full_name(node);
+ board->caps = CLCD_CAP_ALL;
+ board->check = clcdfb_check;
+ board->decode = clcdfb_decode;
+ if (of_find_property(node, "memory-region", NULL)) {
+ board->setup = clcdfb_of_vram_setup;
+ board->mmap = clcdfb_of_vram_mmap;
+ board->remove = clcdfb_of_vram_remove;
+ } else {
+ board->setup = clcdfb_of_dma_setup;
+ board->mmap = clcdfb_of_dma_mmap;
+ board->remove = clcdfb_of_dma_remove;
+ }
+
+ return board;
+}
+#else
+static struct clcd_board *clcdfb_of_get_board(struct amba_device *dev)
+{
+ return NULL;
+}
+#endif
+
static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct clcd_board *board = dev_get_platdata(&dev->dev);
@@ -550,6 +810,9 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
int ret;
if (!board)
+ board = clcdfb_of_get_board(dev);
+
+ if (!board)
return -EINVAL;
ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index d36e830d6fc6..92640d46770a 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -290,7 +290,7 @@ static void init_contrast(struct atmel_lcdfb_info *sinfo)
/* contrast pwm can be 'inverted' */
if (pdata->lcdcon_pol_negative)
- contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE);
+ contrast_ctr &= ~(ATMEL_LCDC_POL_POSITIVE);
/* have some default contrast/backlight settings */
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
@@ -1097,6 +1097,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
pdata->lcd_wiring_mode = ret;
pdata->lcdcon_is_backlight = of_property_read_bool(display_np, "atmel,lcdcon-backlight");
+ pdata->lcdcon_pol_negative = of_property_read_bool(display_np, "atmel,lcdcon-backlight-inverted");
timings = of_get_display_timings(display_np);
if (!timings) {
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index 52108be69e77..ff6070170d01 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -1802,13 +1802,7 @@ static int aty128_bl_update_status(struct backlight_device *bd)
return 0;
}
-static int aty128_bl_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static const struct backlight_ops aty128_bl_data = {
- .get_brightness = aty128_bl_get_brightness,
.update_status = aty128_bl_update_status,
};
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index c3d0074a32db..37ec09b3fffd 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2211,13 +2211,7 @@ static int aty_bl_update_status(struct backlight_device *bd)
return 0;
}
-static int aty_bl_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static const struct backlight_ops aty_bl_data = {
- .get_brightness = aty_bl_get_brightness,
.update_status = aty_bl_update_status,
};
diff --git a/drivers/video/fbdev/aty/radeon_backlight.c b/drivers/video/fbdev/aty/radeon_backlight.c
index db572df7e1ef..301d6d6aeead 100644
--- a/drivers/video/fbdev/aty/radeon_backlight.c
+++ b/drivers/video/fbdev/aty/radeon_backlight.c
@@ -123,13 +123,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
return 0;
}
-static int radeon_bl_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static const struct backlight_ops radeon_bl_data = {
- .get_brightness = radeon_bl_get_brightness,
.update_status = radeon_bl_update_status,
};
diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c
index 372d4aea9d1c..0676746ec68c 100644
--- a/drivers/video/fbdev/au1100fb.c
+++ b/drivers/video/fbdev/au1100fb.c
@@ -41,6 +41,7 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -113,7 +114,7 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
case VESA_NO_BLANKING:
/* Turn on panel */
fbdev->regs->lcd_control |= LCD_CONTROL_GO;
- au_sync();
+ wmb(); /* drain writebuffer */
break;
case VESA_VSYNC_SUSPEND:
@@ -121,7 +122,7 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi)
case VESA_POWERDOWN:
/* Turn off panel */
fbdev->regs->lcd_control &= ~LCD_CONTROL_GO;
- au_sync();
+ wmb(); /* drain writebuffer */
break;
default:
break;
@@ -434,7 +435,7 @@ static int au1100fb_drv_probe(struct platform_device *dev)
struct au1100fb_device *fbdev = NULL;
struct resource *regs_res;
unsigned long page;
- u32 sys_clksrc;
+ struct clk *c;
/* Allocate new device private */
fbdev = devm_kzalloc(&dev->dev, sizeof(struct au1100fb_device),
@@ -473,6 +474,13 @@ static int au1100fb_drv_probe(struct platform_device *dev)
print_dbg("Register memory map at %p", fbdev->regs);
print_dbg("phys=0x%08x, size=%d", fbdev->regs_phys, fbdev->regs_len);
+ c = clk_get(NULL, "lcd_intclk");
+ if (!IS_ERR(c)) {
+ fbdev->lcdclk = c;
+ clk_set_rate(c, 48000000);
+ clk_prepare_enable(c);
+ }
+
/* Allocate the framebuffer to the maximum screen size * nbr of video buffers */
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
@@ -506,10 +514,6 @@ static int au1100fb_drv_probe(struct platform_device *dev)
print_dbg("Framebuffer memory map at %p", fbdev->fb_mem);
print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024);
- /* Setup LCD clock to AUX (48 MHz) */
- sys_clksrc = au_readl(SYS_CLKSRC) & ~(SYS_CS_ML_MASK | SYS_CS_DL | SYS_CS_CL);
- au_writel((sys_clksrc | (1 << SYS_CS_ML_BIT)), SYS_CLKSRC);
-
/* load the panel info into the var struct */
au1100fb_var.bits_per_pixel = fbdev->panel->bpp;
au1100fb_var.xres = fbdev->panel->xres;
@@ -546,6 +550,10 @@ static int au1100fb_drv_probe(struct platform_device *dev)
return 0;
failed:
+ if (fbdev->lcdclk) {
+ clk_disable_unprepare(fbdev->lcdclk);
+ clk_put(fbdev->lcdclk);
+ }
if (fbdev->fb_mem) {
dma_free_noncoherent(&dev->dev, fbdev->fb_len, fbdev->fb_mem,
fbdev->fb_phys);
@@ -576,11 +584,15 @@ int au1100fb_drv_remove(struct platform_device *dev)
fb_dealloc_cmap(&fbdev->info.cmap);
+ if (fbdev->lcdclk) {
+ clk_disable_unprepare(fbdev->lcdclk);
+ clk_put(fbdev->lcdclk);
+ }
+
return 0;
}
#ifdef CONFIG_PM
-static u32 sys_clksrc;
static struct au1100fb_regs fbregs;
int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
@@ -590,14 +602,11 @@ int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state)
if (!fbdev)
return 0;
- /* Save the clock source state */
- sys_clksrc = au_readl(SYS_CLKSRC);
-
/* Blank the LCD */
au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info);
- /* Stop LCD clocking */
- au_writel(sys_clksrc & ~SYS_CS_ML_MASK, SYS_CLKSRC);
+ if (fbdev->lcdclk)
+ clk_disable(fbdev->lcdclk);
memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs));
@@ -613,8 +622,8 @@ int au1100fb_drv_resume(struct platform_device *dev)
memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs));
- /* Restart LCD clocking */
- au_writel(sys_clksrc, SYS_CLKSRC);
+ if (fbdev->lcdclk)
+ clk_enable(fbdev->lcdclk);
/* Unblank the LCD */
au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info);
diff --git a/drivers/video/fbdev/au1100fb.h b/drivers/video/fbdev/au1100fb.h
index 12d9642d5465..9af19939a9c6 100644
--- a/drivers/video/fbdev/au1100fb.h
+++ b/drivers/video/fbdev/au1100fb.h
@@ -109,6 +109,7 @@ struct au1100fb_device {
size_t fb_len;
dma_addr_t fb_phys;
int panel_idx;
+ struct clk *lcdclk;
};
/********************************************************************/
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index 4cfba78a1458..40494dbdf519 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -30,6 +30,7 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
@@ -330,9 +331,8 @@ struct panel_settings
uint32 mode_pwmhi;
uint32 mode_outmask;
uint32 mode_fifoctrl;
- uint32 mode_toyclksrc;
uint32 mode_backlight;
- uint32 mode_auxpll;
+ uint32 lcdclk;
#define Xres min_xres
#define Yres min_yres
u32 min_xres; /* Minimum horizontal resolution */
@@ -379,9 +379,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 8, /* 96MHz AUXPLL */
+ .lcdclk = 96,
320, 320,
240, 240,
},
@@ -407,9 +406,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 8, /* 96MHz AUXPLL */
+ .lcdclk = 96,
640, 480,
640, 480,
},
@@ -435,9 +433,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 8, /* 96MHz AUXPLL */
+ .lcdclk = 96,
800, 800,
600, 600,
},
@@ -463,9 +460,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 6, /* 72MHz AUXPLL */
+ .lcdclk = 72,
1024, 1024,
768, 768,
},
@@ -491,9 +487,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 10, /* 120MHz AUXPLL */
+ .lcdclk = 120,
1280, 1280,
1024, 1024,
},
@@ -519,9 +514,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x03400000, /* SCB 0x0 */
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 8, /* 96MHz AUXPLL */
+ .lcdclk = 96,
1024, 1024,
768, 768,
},
@@ -550,9 +544,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 8, /* 96MHz AUXPLL */
+ .lcdclk = 96,
640, 480,
640, 480,
},
@@ -581,9 +574,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 8, /* 96MHz AUXPLL */
+ .lcdclk = 96, /* 96MHz AUXPLL */
320, 320,
240, 240,
},
@@ -612,9 +604,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x03400000,
.mode_outmask = 0x00fcfcfc,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = 8, /* 96MHz AUXPLL */
+ .lcdclk = 96,
856, 856,
480, 480,
},
@@ -646,9 +637,8 @@ static struct panel_settings known_lcd_panels[] =
.mode_pwmhi = 0x00000000,
.mode_outmask = 0x00FFFFFF,
.mode_fifoctrl = 0x2f2f2f2f,
- .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
- .mode_auxpll = (48/12) * 2,
+ .lcdclk = 96,
800, 800,
480, 480,
},
@@ -764,7 +754,7 @@ static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
/* Disable the window while making changes, then restore WINEN */
winenable = lcd->winenable & (1 << plane);
- au_sync();
+ wmb(); /* drain writebuffer */
lcd->winenable &= ~(1 << plane);
lcd->window[plane].winctrl0 = winctrl0;
lcd->window[plane].winctrl1 = winctrl1;
@@ -772,7 +762,7 @@ static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
lcd->window[plane].winbuf1 = fbdev->fb_phys;
lcd->window[plane].winbufctrl = 0; /* select winbuf0 */
lcd->winenable |= winenable;
- au_sync();
+ wmb(); /* drain writebuffer */
return 0;
}
@@ -788,22 +778,21 @@ static void au1200_setpanel(struct panel_settings *newpanel,
/* Make sure all windows disabled */
winenable = lcd->winenable;
lcd->winenable = 0;
- au_sync();
+ wmb(); /* drain writebuffer */
/*
* Ensure everything is disabled before reconfiguring
*/
if (lcd->screen & LCD_SCREEN_SEN) {
/* Wait for vertical sync period */
lcd->intstatus = LCD_INT_SS;
- while ((lcd->intstatus & LCD_INT_SS) == 0) {
- au_sync();
- }
+ while ((lcd->intstatus & LCD_INT_SS) == 0)
+ ;
lcd->screen &= ~LCD_SCREEN_SEN; /*disable the controller*/
do {
lcd->intstatus = lcd->intstatus; /*clear interrupts*/
- au_sync();
+ wmb(); /* drain writebuffer */
/*wait for controller to shut down*/
} while ((lcd->intstatus & LCD_INT_SD) == 0);
@@ -829,11 +818,17 @@ static void au1200_setpanel(struct panel_settings *newpanel,
*/
if (!(panel->mode_clkcontrol & LCD_CLKCONTROL_EXT))
{
- uint32 sys_clksrc;
- au_writel(panel->mode_auxpll, SYS_AUXPLL);
- sys_clksrc = au_readl(SYS_CLKSRC) & ~0x0000001f;
- sys_clksrc |= panel->mode_toyclksrc;
- au_writel(sys_clksrc, SYS_CLKSRC);
+ struct clk *c = clk_get(NULL, "lcd_intclk");
+ long r, pc = panel->lcdclk * 1000000;
+
+ if (!IS_ERR(c)) {
+ r = clk_round_rate(c, pc);
+ if ((pc - r) < (pc / 10)) { /* 10% slack */
+ clk_set_rate(c, r);
+ clk_prepare_enable(c);
+ }
+ clk_put(c);
+ }
}
/*
@@ -847,7 +842,7 @@ static void au1200_setpanel(struct panel_settings *newpanel,
lcd->pwmhi = panel->mode_pwmhi;
lcd->outmask = panel->mode_outmask;
lcd->fifoctrl = panel->mode_fifoctrl;
- au_sync();
+ wmb(); /* drain writebuffer */
/* fixme: Check window settings to make sure still valid
* for new geometry */
@@ -863,7 +858,7 @@ static void au1200_setpanel(struct panel_settings *newpanel,
* Re-enable screen now that it is configured
*/
lcd->screen |= LCD_SCREEN_SEN;
- au_sync();
+ wmb(); /* drain writebuffer */
/* Call init of panel */
if (pd->panel_init)
@@ -956,7 +951,7 @@ static void au1200_setmode(struct au1200fb_device *fbdev)
| LCD_WINCTRL2_SCY_1
) ;
lcd->winenable |= win->w[plane].mode_winenable;
- au_sync();
+ wmb(); /* drain writebuffer */
}
@@ -1270,7 +1265,7 @@ static void set_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
if (pdata->flags & SCREEN_MASK)
lcd->colorkeymsk = pdata->mask;
- au_sync();
+ wmb(); /* drain writebuffer */
}
static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
@@ -1288,7 +1283,7 @@ static void get_global(u_int cmd, struct au1200_lcd_global_regs_t *pdata)
hi1 = (lcd->pwmhi >> 16) + 1;
divider = (lcd->pwmdiv & 0x3FFFF) + 1;
pdata->brightness = ((hi1 << 8) / divider) - 1;
- au_sync();
+ wmb(); /* drain writebuffer */
}
static void set_window(unsigned int plane,
@@ -1387,7 +1382,7 @@ static void set_window(unsigned int plane,
val |= (pdata->enable & 1) << plane;
lcd->winenable = val;
}
- au_sync();
+ wmb(); /* drain writebuffer */
}
static void get_window(unsigned int plane,
@@ -1414,7 +1409,7 @@ static void get_window(unsigned int plane,
pdata->ram_array_mode = (lcd->window[plane].winctrl2 & LCD_WINCTRL2_RAM) >> 21;
pdata->enable = (lcd->winenable >> plane) & 1;
- au_sync();
+ wmb(); /* drain writebuffer */
}
static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
@@ -1511,7 +1506,7 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id)
{
/* Nothing to do for now, just clear any pending interrupt */
lcd->intstatus = lcd->intstatus;
- au_sync();
+ wmb(); /* drain writebuffer */
return IRQ_HANDLED;
}
@@ -1809,7 +1804,7 @@ static int au1200fb_drv_suspend(struct device *dev)
au1200_setpanel(NULL, pd);
lcd->outmask = 0;
- au_sync();
+ wmb(); /* drain writebuffer */
return 0;
}
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
new file mode 100644
index 000000000000..49a7bb4ef02f
--- /dev/null
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -0,0 +1,397 @@
+/*
+ * Cirrus Logic CLPS711X FB driver
+ *
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ * Based on driver by Russell King <rmk@arm.linux.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/lcd.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
+#include <linux/regulator/consumer.h>
+#include <video/of_display_timing.h>
+
+#define CLPS711X_FB_NAME "clps711x-fb"
+#define CLPS711X_FB_BPP_MAX (4)
+
+/* Registers relative to LCDCON */
+#define CLPS711X_LCDCON (0x0000)
+# define LCDCON_GSEN BIT(30)
+# define LCDCON_GSMD BIT(31)
+#define CLPS711X_PALLSW (0x0280)
+#define CLPS711X_PALMSW (0x02c0)
+#define CLPS711X_FBADDR (0x0d40)
+
+struct clps711x_fb_info {
+ struct clk *clk;
+ void __iomem *base;
+ struct regmap *syscon;
+ resource_size_t buffsize;
+ struct fb_videomode mode;
+ struct regulator *lcd_pwr;
+ u32 ac_prescale;
+ bool cmap_invert;
+};
+
+static int clps711x_fb_setcolreg(u_int regno, u_int red, u_int green,
+ u_int blue, u_int transp, struct fb_info *info)
+{
+ struct clps711x_fb_info *cfb = info->par;
+ u32 level, mask, shift;
+
+ if (regno >= BIT(info->var.bits_per_pixel))
+ return -EINVAL;
+
+ shift = 4 * (regno & 7);
+ mask = 0xf << shift;
+ /* gray = 0.30*R + 0.58*G + 0.11*B */
+ level = (((red * 77 + green * 151 + blue * 28) >> 20) << shift) & mask;
+ if (cfb->cmap_invert)
+ level = 0xf - level;
+
+ regno = (regno < 8) ? CLPS711X_PALLSW : CLPS711X_PALMSW;
+
+ writel((readl(cfb->base + regno) & ~mask) | level, cfb->base + regno);
+
+ return 0;
+}
+
+static int clps711x_fb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ u32 val;
+
+ if (var->bits_per_pixel < 1 ||
+ var->bits_per_pixel > CLPS711X_FB_BPP_MAX)
+ return -EINVAL;
+
+ if (!var->pixclock)
+ return -EINVAL;
+
+ val = DIV_ROUND_UP(var->xres, 16) - 1;
+ if (val < 0x01 || val > 0x3f)
+ return -EINVAL;
+
+ val = DIV_ROUND_UP(var->yres * var->xres * var->bits_per_pixel, 128);
+ val--;
+ if (val < 0x001 || val > 0x1fff)
+ return -EINVAL;
+
+ var->transp.msb_right = 0;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->red.msb_right = 0;
+ var->red.offset = 0;
+ var->red.length = var->bits_per_pixel;
+ var->green = var->red;
+ var->blue = var->red;
+ var->grayscale = var->bits_per_pixel > 1;
+
+ return 0;
+}
+
+static int clps711x_fb_set_par(struct fb_info *info)
+{
+ struct clps711x_fb_info *cfb = info->par;
+ resource_size_t size;
+ u32 lcdcon, pps;
+
+ size = (info->var.xres * info->var.yres * info->var.bits_per_pixel) / 8;
+ if (size > cfb->buffsize)
+ return -EINVAL;
+
+ switch (info->var.bits_per_pixel) {
+ case 1:
+ info->fix.visual = FB_VISUAL_MONO01;
+ break;
+ case 2:
+ case 4:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ info->fix.line_length = info->var.xres * info->var.bits_per_pixel / 8;
+ info->fix.smem_len = size;
+
+ lcdcon = (info->var.xres * info->var.yres *
+ info->var.bits_per_pixel) / 128 - 1;
+ lcdcon |= ((info->var.xres / 16) - 1) << 13;
+ lcdcon |= (cfb->ac_prescale & 0x1f) << 25;
+
+ pps = clk_get_rate(cfb->clk) / (PICOS2KHZ(info->var.pixclock) * 1000);
+ if (pps)
+ pps--;
+ lcdcon |= (pps & 0x3f) << 19;
+
+ if (info->var.bits_per_pixel == 4)
+ lcdcon |= LCDCON_GSMD;
+ if (info->var.bits_per_pixel >= 2)
+ lcdcon |= LCDCON_GSEN;
+
+ /* LCDCON must only be changed while the LCD is disabled */
+ regmap_update_bits(cfb->syscon, SYSCON_OFFSET, SYSCON1_LCDEN, 0);
+ writel(lcdcon, cfb->base + CLPS711X_LCDCON);
+ regmap_update_bits(cfb->syscon, SYSCON_OFFSET,
+ SYSCON1_LCDEN, SYSCON1_LCDEN);
+
+ return 0;
+}
+
+static int clps711x_fb_blank(int blank, struct fb_info *info)
+{
+ /* Return happy */
+ return 0;
+}
+
+static struct fb_ops clps711x_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_setcolreg = clps711x_fb_setcolreg,
+ .fb_check_var = clps711x_fb_check_var,
+ .fb_set_par = clps711x_fb_set_par,
+ .fb_blank = clps711x_fb_blank,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+};
+
+static int clps711x_lcd_check_fb(struct lcd_device *lcddev, struct fb_info *fi)
+{
+ struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev);
+
+ return (!fi || fi->par == cfb) ? 1 : 0;
+}
+
+static int clps711x_lcd_get_power(struct lcd_device *lcddev)
+{
+ struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev);
+
+ if (!IS_ERR_OR_NULL(cfb->lcd_pwr))
+ if (!regulator_is_enabled(cfb->lcd_pwr))
+ return FB_BLANK_NORMAL;
+
+ return FB_BLANK_UNBLANK;
+}
+
+static int clps711x_lcd_set_power(struct lcd_device *lcddev, int blank)
+{
+ struct clps711x_fb_info *cfb = dev_get_drvdata(&lcddev->dev);
+
+ if (!IS_ERR_OR_NULL(cfb->lcd_pwr)) {
+ if (blank == FB_BLANK_UNBLANK) {
+ if (!regulator_is_enabled(cfb->lcd_pwr))
+ return regulator_enable(cfb->lcd_pwr);
+ } else {
+ if (regulator_is_enabled(cfb->lcd_pwr))
+ return regulator_disable(cfb->lcd_pwr);
+ }
+ }
+
+ return 0;
+}
+
+static struct lcd_ops clps711x_lcd_ops = {
+ .check_fb = clps711x_lcd_check_fb,
+ .get_power = clps711x_lcd_get_power,
+ .set_power = clps711x_lcd_set_power,
+};
+
+static int clps711x_fb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *disp, *np = dev->of_node;
+ struct clps711x_fb_info *cfb;
+ struct lcd_device *lcd;
+ struct fb_info *info;
+ struct resource *res;
+ int ret = -ENOENT;
+ u32 val;
+
+ if (fb_get_options(CLPS711X_FB_NAME, NULL))
+ return -ENODEV;
+
+ info = framebuffer_alloc(sizeof(*cfb), dev);
+ if (!info)
+ return -ENOMEM;
+
+ cfb = info->par;
+ platform_set_drvdata(pdev, info);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ goto out_fb_release;
+ cfb->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!cfb->base) {
+ ret = -ENOMEM;
+ goto out_fb_release;
+ }
+
+ info->fix.mmio_start = res->start;
+ info->fix.mmio_len = resource_size(res);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ info->screen_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(info->screen_base)) {
+ ret = PTR_ERR(info->screen_base);
+ goto out_fb_release;
+ }
+
+ /* Physical address should be aligned to 256 MiB */
+ if (res->start & 0x0fffffff) {
+ ret = -EINVAL;
+ goto out_fb_release;
+ }
+
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ ret = -ENOMEM;
+ goto out_fb_release;
+ }
+
+ cfb->buffsize = resource_size(res);
+ info->fix.smem_start = res->start;
+ info->apertures->ranges[0].base = info->fix.smem_start;
+ info->apertures->ranges[0].size = cfb->buffsize;
+
+ cfb->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(cfb->clk)) {
+ ret = PTR_ERR(cfb->clk);
+ goto out_fb_release;
+ }
+
+ cfb->syscon =
+ syscon_regmap_lookup_by_compatible("cirrus,clps711x-syscon1");
+ if (IS_ERR(cfb->syscon)) {
+ ret = PTR_ERR(cfb->syscon);
+ goto out_fb_release;
+ }
+
+ disp = of_parse_phandle(np, "display", 0);
+ if (!disp) {
+ dev_err(&pdev->dev, "No display defined\n");
+ ret = -ENODATA;
+ goto out_fb_release;
+ }
+
+ ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
+ if (ret)
+ goto out_fb_release;
+
+ of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
+ cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
+
+ ret = of_property_read_u32(disp, "bits-per-pixel",
+ &info->var.bits_per_pixel);
+ if (ret)
+ goto out_fb_release;
+
+ /* Force disable LCD on any mismatch */
+ if (info->fix.smem_start != (readb(cfb->base + CLPS711X_FBADDR) << 28))
+ regmap_update_bits(cfb->syscon, SYSCON_OFFSET,
+ SYSCON1_LCDEN, 0);
+
+ ret = regmap_read(cfb->syscon, SYSCON_OFFSET, &val);
+ if (ret)
+ goto out_fb_release;
+
+ if (!(val & SYSCON1_LCDEN)) {
+ /* Setup start FB address */
+ writeb(info->fix.smem_start >> 28, cfb->base + CLPS711X_FBADDR);
+ /* Clean FB memory */
+ memset_io(info->screen_base, 0, cfb->buffsize);
+ }
+
+ cfb->lcd_pwr = devm_regulator_get(dev, "lcd");
+ if (PTR_ERR(cfb->lcd_pwr) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_fb_release;
+ }
+
+ info->fbops = &clps711x_fb_ops;
+ info->flags = FBINFO_DEFAULT;
+ info->var.activate = FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
+ info->var.height = -1;
+ info->var.width = -1;
+ info->var.vmode = FB_VMODE_NONINTERLACED;
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.accel = FB_ACCEL_NONE;
+ strlcpy(info->fix.id, CLPS711X_FB_NAME, sizeof(info->fix.id));
+ fb_videomode_to_var(&info->var, &cfb->mode);
+
+ ret = fb_alloc_cmap(&info->cmap, BIT(CLPS711X_FB_BPP_MAX), 0);
+ if (ret)
+ goto out_fb_release;
+
+ ret = fb_set_var(info, &info->var);
+ if (ret)
+ goto out_fb_dealloc_cmap;
+
+ ret = register_framebuffer(info);
+ if (ret)
+ goto out_fb_dealloc_cmap;
+
+ lcd = devm_lcd_device_register(dev, "clps711x-lcd", dev, cfb,
+ &clps711x_lcd_ops);
+ if (!IS_ERR(lcd))
+ return 0;
+
+ ret = PTR_ERR(lcd);
+ unregister_framebuffer(info);
+
+out_fb_dealloc_cmap:
+ regmap_update_bits(cfb->syscon, SYSCON_OFFSET, SYSCON1_LCDEN, 0);
+ fb_dealloc_cmap(&info->cmap);
+
+out_fb_release:
+ framebuffer_release(info);
+
+ return ret;
+}
+
+static int clps711x_fb_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct clps711x_fb_info *cfb = info->par;
+
+ regmap_update_bits(cfb->syscon, SYSCON_OFFSET, SYSCON1_LCDEN, 0);
+
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+
+ return 0;
+}
+
+static const struct of_device_id clps711x_fb_dt_ids[] = {
+ { .compatible = "cirrus,clps711x-fb", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clps711x_fb_dt_ids);
+
+static struct platform_driver clps711x_fb_driver = {
+ .driver = {
+ .name = CLPS711X_FB_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = clps711x_fb_dt_ids,
+ },
+ .probe = clps711x_fb_probe,
+ .remove = clps711x_fb_remove,
+};
+module_platform_driver(clps711x_fb_driver);
+
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("Cirrus Logic CLPS711X FB driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index a8484f768d04..788f6b37fce7 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -1447,18 +1447,15 @@ static int fb_probe(struct platform_device *device)
da8xx_fb_fix.line_length - 1;
/* allocate palette buffer */
- par->v_palette_base = dma_alloc_coherent(NULL,
- PALETTE_SIZE,
- (resource_size_t *)
- &par->p_palette_base,
- GFP_KERNEL | GFP_DMA);
+ par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
+ (resource_size_t *)&par->p_palette_base,
+ GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
dev_err(&device->dev,
"GLCD: kmalloc for palette buffer failed\n");
ret = -EINVAL;
goto err_release_fb_mem;
}
- memset(par->v_palette_base, 0, PALETTE_SIZE);
par->irq = platform_get_irq(device, 0);
if (par->irq < 0) {
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 29e70ed3f154..95873f26e39c 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -704,11 +704,6 @@ static int s6e8ax0_get_power(struct lcd_device *ld)
return lcd->power;
}
-static int s6e8ax0_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static int s6e8ax0_set_brightness(struct backlight_device *bd)
{
int ret = 0, brightness = bd->props.brightness;
@@ -736,7 +731,6 @@ static struct lcd_ops s6e8ax0_lcd_ops = {
};
static const struct backlight_ops s6e8ax0_backlight_ops = {
- .get_brightness = s6e8ax0_get_brightness,
.update_status = s6e8ax0_set_brightness,
};
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index e23392ec5af3..569e7562fa3d 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -224,6 +224,11 @@ struct hvfb_par {
u32 pseudo_palette[16];
u8 init_buf[MAX_VMBUS_PKT_SIZE];
u8 recv_buf[MAX_VMBUS_PKT_SIZE];
+
+ /* If true, the VSC notifies the VSP on every framebuffer change */
+ bool synchronous_fb;
+
+ struct notifier_block hvfb_panic_nb;
};
static uint screen_width = HVFB_WIDTH;
@@ -532,6 +537,19 @@ static void hvfb_update_work(struct work_struct *w)
schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
}
+static int hvfb_on_panic(struct notifier_block *nb,
+ unsigned long e, void *p)
+{
+ struct hvfb_par *par;
+ struct fb_info *info;
+
+ par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
+ par->synchronous_fb = true;
+ info = par->info;
+ synthvid_update(info);
+
+ return NOTIFY_DONE;
+}
/* Framebuffer operation handlers */
@@ -582,14 +600,44 @@ static int hvfb_blank(int blank, struct fb_info *info)
return 1; /* get fb_blank to set the colormap to all black */
}
+static void hvfb_cfb_fillrect(struct fb_info *p,
+ const struct fb_fillrect *rect)
+{
+ struct hvfb_par *par = p->par;
+
+ cfb_fillrect(p, rect);
+ if (par->synchronous_fb)
+ synthvid_update(p);
+}
+
+static void hvfb_cfb_copyarea(struct fb_info *p,
+ const struct fb_copyarea *area)
+{
+ struct hvfb_par *par = p->par;
+
+ cfb_copyarea(p, area);
+ if (par->synchronous_fb)
+ synthvid_update(p);
+}
+
+static void hvfb_cfb_imageblit(struct fb_info *p,
+ const struct fb_image *image)
+{
+ struct hvfb_par *par = p->par;
+
+ cfb_imageblit(p, image);
+ if (par->synchronous_fb)
+ synthvid_update(p);
+}
+
static struct fb_ops hvfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = hvfb_check_var,
.fb_set_par = hvfb_set_par,
.fb_setcolreg = hvfb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
+ .fb_fillrect = hvfb_cfb_fillrect,
+ .fb_copyarea = hvfb_cfb_copyarea,
+ .fb_imageblit = hvfb_cfb_imageblit,
.fb_blank = hvfb_blank,
};
@@ -801,6 +849,11 @@ static int hvfb_probe(struct hv_device *hdev,
par->fb_ready = true;
+ par->synchronous_fb = false;
+ par->hvfb_panic_nb.notifier_call = hvfb_on_panic;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &par->hvfb_panic_nb);
+
return 0;
error:
@@ -820,6 +873,9 @@ static int hvfb_remove(struct hv_device *hdev)
struct fb_info *info = hv_get_drvdata(hdev);
struct hvfb_par *par = info->par;
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &par->hvfb_panic_nb);
+
par->update = false;
par->fb_ready = false;
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 2bd52ed8832c..698df9543e30 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -628,14 +628,14 @@ static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd,
case MBXFB_IOCS_PLANEORDER:
if (copy_from_user(&porder, (void __user*)arg,
sizeof(struct mbxfb_planeorder)))
- return -EFAULT;
+ return -EFAULT;
return mbxfb_ioctl_planeorder(&porder);
case MBXFB_IOCS_ALPHA:
if (copy_from_user(&alpha, (void __user*)arg,
sizeof(struct mbxfb_alphaCtl)))
- return -EFAULT;
+ return -EFAULT;
return mbxfb_ioctl_alphactl(&alpha);
diff --git a/drivers/video/fbdev/msm/mddi_client_dummy.c b/drivers/video/fbdev/msm/mddi_client_dummy.c
index f1b0dfcc9717..cdb8f69a5d88 100644
--- a/drivers/video/fbdev/msm/mddi_client_dummy.c
+++ b/drivers/video/fbdev/msm/mddi_client_dummy.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
+#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -51,8 +52,7 @@ static int mddi_dummy_probe(struct platform_device *pdev)
{
struct msm_mddi_client_data *client_data = pdev->dev.platform_data;
struct panel_info *panel =
- kzalloc(sizeof(struct panel_info), GFP_KERNEL);
- int ret;
+ devm_kzalloc(&pdev->dev, sizeof(struct panel_info), GFP_KERNEL);
if (!panel)
return -ENOMEM;
platform_set_drvdata(pdev, panel);
@@ -67,24 +67,11 @@ static int mddi_dummy_probe(struct platform_device *pdev)
client_data->fb_resource, 1);
panel->panel_data.fb_data = client_data->private_client_data;
panel->pdev.dev.platform_data = &panel->panel_data;
- ret = platform_device_register(&panel->pdev);
- if (ret) {
- kfree(panel);
- return ret;
- }
- return 0;
-}
-
-static int mddi_dummy_remove(struct platform_device *pdev)
-{
- struct panel_info *panel = platform_get_drvdata(pdev);
- kfree(panel);
- return 0;
+ return platform_device_register(&panel->pdev);
}
static struct platform_driver mddi_client_dummy = {
.probe = mddi_dummy_probe,
- .remove = mddi_dummy_remove,
.driver = { .name = "mddi_c_dummy" },
};
diff --git a/drivers/video/fbdev/nvidia/nv_backlight.c b/drivers/video/fbdev/nvidia/nv_backlight.c
index 8471008aa6ff..5c151b2ea683 100644
--- a/drivers/video/fbdev/nvidia/nv_backlight.c
+++ b/drivers/video/fbdev/nvidia/nv_backlight.c
@@ -82,13 +82,7 @@ static int nvidia_bl_update_status(struct backlight_device *bd)
return 0;
}
-static int nvidia_bl_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static const struct backlight_ops nvidia_bl_ops = {
- .get_brightness = nvidia_bl_get_brightness,
.update_status = nvidia_bl_update_status,
};
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
index 4420ccb69aa9..131c6e260898 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
@@ -262,6 +262,23 @@ static int hdmic_audio_config(struct omap_dss_device *dssdev,
return 0;
}
+static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode);
+}
+
+static int hdmic_set_infoframe(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->set_infoframe(in, avi);
+}
+
static struct omap_dss_driver hdmic_driver = {
.connect = hdmic_connect,
.disconnect = hdmic_disconnect,
@@ -277,6 +294,8 @@ static struct omap_dss_driver hdmic_driver = {
.read_edid = hdmic_read_edid,
.detect = hdmic_detect,
+ .set_hdmi_mode = hdmic_set_hdmi_mode,
+ .set_hdmi_infoframe = hdmic_set_infoframe,
.audio_enable = hdmic_audio_enable,
.audio_disable = hdmic_audio_disable,
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
index 7e33686171e3..c891d8f84cb2 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
@@ -242,6 +242,24 @@ static int tpd_audio_config(struct omap_dss_device *dssdev,
return in->ops.hdmi->audio_config(in, audio);
}
+static int tpd_set_infoframe(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->set_infoframe(in, avi);
+}
+
+static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev,
+ bool hdmi_mode)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct omap_dss_device *in = ddata->in;
+
+ return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode);
+}
+
static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
.connect = tpd_connect,
.disconnect = tpd_disconnect,
@@ -255,6 +273,8 @@ static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
.read_edid = tpd_read_edid,
.detect = tpd_detect,
+ .set_infoframe = tpd_set_infoframe,
+ .set_hdmi_mode = tpd_set_hdmi_mode,
.audio_enable = tpd_audio_enable,
.audio_disable = tpd_audio_disable,
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index c7ba4d8b928a..617f8d2f5127 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -817,6 +817,10 @@ static int acx565akm_probe(struct spi_device *spi)
bldev = backlight_device_register("acx565akm", &ddata->spi->dev,
ddata, &acx565akm_bl_ops, &props);
+ if (IS_ERR(bldev)) {
+ r = PTR_ERR(bldev);
+ goto err_reg_bl;
+ }
ddata->bl_dev = bldev;
if (ddata->has_cabc) {
r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
@@ -862,6 +866,7 @@ err_reg:
sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group);
err_sysfs:
backlight_device_unregister(bldev);
+err_reg_bl:
err_detect:
err_gpio:
omap_dss_put_device(ddata->in);
diff --git a/drivers/video/fbdev/omap2/dss/Kconfig b/drivers/video/fbdev/omap2/dss/Kconfig
index 285bcd103dce..3d5eb6c36c22 100644
--- a/drivers/video/fbdev/omap2/dss/Kconfig
+++ b/drivers/video/fbdev/omap2/dss/Kconfig
@@ -5,6 +5,7 @@ menuconfig OMAP2_DSS
tristate "OMAP2+ Display Subsystem support"
select VIDEOMODE_HELPERS
select OMAP2_DSS_INIT
+ select HDMI
help
OMAP2+ Display Subsystem support.
diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index 7aa33b0f4a1f..be053aa80880 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -2879,19 +2879,24 @@ static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
bool dispc_mgr_timings_ok(enum omap_channel channel,
const struct omap_video_timings *timings)
{
- bool timings_ok;
-
- timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
+ if (!_dispc_mgr_size_ok(timings->x_res, timings->y_res))
+ return false;
- timings_ok &= _dispc_mgr_pclk_ok(channel, timings->pixelclock);
+ if (!_dispc_mgr_pclk_ok(channel, timings->pixelclock))
+ return false;
if (dss_mgr_is_lcd(channel)) {
- timings_ok &= _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
+ /* TODO: OMAP4+ supports interlace for LCD outputs */
+ if (timings->interlace)
+ return false;
+
+ if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
timings->hbp, timings->vsw, timings->vfp,
- timings->vbp);
+ timings->vbp))
+ return false;
}
- return timings_ok;
+ return true;
}
static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -3257,13 +3262,10 @@ static void dispc_dump_regs(struct seq_file *s)
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
- DUMPREG(i, DISPC_DEFAULT_COLOR);
- DUMPREG(i, DISPC_TRANS_COLOR);
DUMPREG(i, DISPC_TIMING_H);
DUMPREG(i, DISPC_TIMING_V);
DUMPREG(i, DISPC_POL_FREQ);
DUMPREG(i, DISPC_DIVISORo);
- DUMPREG(i, DISPC_SIZE_MGR);
DUMPREG(i, DISPC_DATA_CYCLE1);
DUMPREG(i, DISPC_DATA_CYCLE2);
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index 4755a34a5422..56b92444c54f 100644
--- a/drivers/video/fbdev/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -5658,18 +5658,11 @@ err_runtime_get:
return r;
}
-static int dsi_unregister_child(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- platform_device_unregister(pdev);
- return 0;
-}
-
static int __exit omap_dsihw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- device_for_each_child(&dsidev->dev, NULL, dsi_unregister_child);
+ of_platform_depopulate(&dsidev->dev);
WARN_ON(dsi->scp_clk_refcount > 0);
diff --git a/drivers/video/fbdev/omap2/dss/hdmi.h b/drivers/video/fbdev/omap2/dss/hdmi.h
index fbee07816337..262771b9b76b 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi.h
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/hdmi.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -142,7 +143,7 @@ enum hdmi_audio_samples_perword {
HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1
};
-enum hdmi_audio_sample_size {
+enum hdmi_audio_sample_size_omap {
HDMI_AUDIO_SAMPLE_16BITS = 0,
HDMI_AUDIO_SAMPLE_24BITS = 1
};
@@ -178,59 +179,6 @@ enum hdmi_audio_mclk_mode {
HDMI_AUDIO_MCLK_192FS = 7
};
-/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */
-enum hdmi_core_infoframe {
- HDMI_INFOFRAME_AVI_DB1Y_RGB = 0,
- HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1,
- HDMI_INFOFRAME_AVI_DB1Y_YUV444 = 2,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF = 0,
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_ON = 1,
- HDMI_INFOFRAME_AVI_DB1B_NO = 0,
- HDMI_INFOFRAME_AVI_DB1B_VERT = 1,
- HDMI_INFOFRAME_AVI_DB1B_HORI = 2,
- HDMI_INFOFRAME_AVI_DB1B_VERTHORI = 3,
- HDMI_INFOFRAME_AVI_DB1S_0 = 0,
- HDMI_INFOFRAME_AVI_DB1S_1 = 1,
- HDMI_INFOFRAME_AVI_DB1S_2 = 2,
- HDMI_INFOFRAME_AVI_DB2C_NO = 0,
- HDMI_INFOFRAME_AVI_DB2C_ITU601 = 1,
- HDMI_INFOFRAME_AVI_DB2C_ITU709 = 2,
- HDMI_INFOFRAME_AVI_DB2C_EC_EXTENDED = 3,
- HDMI_INFOFRAME_AVI_DB2M_NO = 0,
- HDMI_INFOFRAME_AVI_DB2M_43 = 1,
- HDMI_INFOFRAME_AVI_DB2M_169 = 2,
- HDMI_INFOFRAME_AVI_DB2R_SAME = 8,
- HDMI_INFOFRAME_AVI_DB2R_43 = 9,
- HDMI_INFOFRAME_AVI_DB2R_169 = 10,
- HDMI_INFOFRAME_AVI_DB2R_149 = 11,
- HDMI_INFOFRAME_AVI_DB3ITC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3ITC_YES = 1,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV601 = 0,
- HDMI_INFOFRAME_AVI_DB3EC_XVYUV709 = 1,
- HDMI_INFOFRAME_AVI_DB3Q_DEFAULT = 0,
- HDMI_INFOFRAME_AVI_DB3Q_LR = 1,
- HDMI_INFOFRAME_AVI_DB3Q_FR = 2,
- HDMI_INFOFRAME_AVI_DB3SC_NO = 0,
- HDMI_INFOFRAME_AVI_DB3SC_HORI = 1,
- HDMI_INFOFRAME_AVI_DB3SC_VERT = 2,
- HDMI_INFOFRAME_AVI_DB3SC_HORIVERT = 3,
- HDMI_INFOFRAME_AVI_DB5PR_NO = 0,
- HDMI_INFOFRAME_AVI_DB5PR_2 = 1,
- HDMI_INFOFRAME_AVI_DB5PR_3 = 2,
- HDMI_INFOFRAME_AVI_DB5PR_4 = 3,
- HDMI_INFOFRAME_AVI_DB5PR_5 = 4,
- HDMI_INFOFRAME_AVI_DB5PR_6 = 5,
- HDMI_INFOFRAME_AVI_DB5PR_7 = 6,
- HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
- HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
- HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
-};
-
-struct hdmi_cm {
- int code;
- int mode;
-};
-
struct hdmi_video_format {
enum hdmi_packing_mode packing_mode;
u32 y_res; /* Line per panel */
@@ -239,7 +187,8 @@ struct hdmi_video_format {
struct hdmi_config {
struct omap_video_timings timings;
- struct hdmi_cm cm;
+ struct hdmi_avi_infoframe infoframe;
+ enum hdmi_core_hdmi_dvi hdmi_dvi_mode;
};
/* HDMI PLL structure */
@@ -260,7 +209,7 @@ struct hdmi_audio_format {
enum hdmi_audio_justify justification;
enum hdmi_audio_sample_order sample_order;
enum hdmi_audio_samples_perword samples_per_word;
- enum hdmi_audio_sample_size sample_size;
+ enum hdmi_audio_sample_size_omap sample_size;
enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end;
};
@@ -298,47 +247,6 @@ struct hdmi_core_audio_config {
bool en_spdif;
};
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_avi {
- /* Y0, Y1 rgb,yCbCr */
- u8 db1_format;
- /* A0 Active information Present */
- u8 db1_active_info;
- /* B0, B1 Bar info data valid */
- u8 db1_bar_info_dv;
- /* S0, S1 scan information */
- u8 db1_scan_info;
- /* C0, C1 colorimetry */
- u8 db2_colorimetry;
- /* M0, M1 Aspect ratio (4:3, 16:9) */
- u8 db2_aspect_ratio;
- /* R0...R3 Active format aspect ratio */
- u8 db2_active_fmt_ar;
- /* ITC IT content. */
- u8 db3_itc;
- /* EC0, EC1, EC2 Extended colorimetry */
- u8 db3_ec;
- /* Q1, Q0 Quantization range */
- u8 db3_q_range;
- /* SC1, SC0 Non-uniform picture scaling */
- u8 db3_nup_scaling;
- /* VIC0..6 Video format identification */
- u8 db4_videocode;
- /* PR0..PR3 Pixel repetition factor */
- u8 db5_pixel_repeat;
- /* Line number end of top bar */
- u16 db6_7_line_eoftop;
- /* Line number start of bottom bar */
- u16 db8_9_line_sofbottom;
- /* Pixel number end of left bar */
- u16 db10_11_pixel_eofleft;
- /* Pixel number start of right bar */
- u16 db12_13_pixel_sofright;
-};
-
struct hdmi_wp_data {
void __iomem *base;
};
@@ -358,8 +266,6 @@ struct hdmi_phy_data {
struct hdmi_core_data {
void __iomem *base;
-
- struct hdmi_core_infoframe_avi avi_cfg;
};
static inline void hdmi_write_reg(void __iomem *base_addr, const u32 idx,
@@ -425,9 +331,6 @@ int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy);
int hdmi_phy_parse_lanes(struct hdmi_phy_data *phy, const u32 *lanes);
/* HDMI common funcs */
-const struct hdmi_config *hdmi_default_timing(void);
-const struct hdmi_config *hdmi_get_timings(int mode, int code);
-struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing);
int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
struct hdmi_phy_data *phy);
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c
index 626aad2bef46..6a8550cf43e5 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4.c
@@ -281,29 +281,11 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct hdmi_cm cm;
- const struct hdmi_config *t;
-
mutex_lock(&hdmi.lock);
- cm = hdmi_get_code(timings);
- hdmi.cfg.cm = cm;
-
- t = hdmi_get_timings(cm.mode, cm.code);
- if (t != NULL) {
- hdmi.cfg = *t;
-
- dispc_set_tv_pclk(t->timings.pixelclock);
- } else {
- hdmi.cfg.timings = *timings;
- hdmi.cfg.cm.code = 0;
- hdmi.cfg.cm.mode = HDMI_DVI;
-
- dispc_set_tv_pclk(timings->pixelclock);
- }
+ hdmi.cfg.timings = *timings;
- DSSDBG("using mode: %s, code %d\n", hdmi.cfg.cm.mode == HDMI_DVI ?
- "DVI" : "HDMI", hdmi.cfg.cm.code);
+ dispc_set_tv_pclk(timings->pixelclock);
mutex_unlock(&hdmi.lock);
}
@@ -311,14 +293,7 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- const struct hdmi_config *cfg;
- struct hdmi_cm cm = hdmi.cfg.cm;
-
- cfg = hdmi_get_timings(cm.mode, cm.code);
- if (cfg == NULL)
- cfg = hdmi_default_timing();
-
- memcpy(timings, &cfg->timings, sizeof(cfg->timings));
+ *timings = hdmi.cfg.timings;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -516,7 +491,7 @@ static int hdmi_audio_enable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
r = -EPERM;
goto err;
}
@@ -554,7 +529,7 @@ static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- r = hdmi_mode_has_audio(hdmi.cfg.cm.mode);
+ r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode);
mutex_unlock(&hdmi.lock);
return r;
@@ -568,7 +543,7 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev,
mutex_lock(&hdmi.lock);
- if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
r = -EPERM;
goto err;
}
@@ -615,6 +590,20 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev,
}
#endif
+static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi)
+{
+ hdmi.cfg.infoframe = *avi;
+ return 0;
+}
+
+static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
+ bool hdmi_mode)
+{
+ hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
+ return 0;
+}
+
static const struct omapdss_hdmi_ops hdmi_ops = {
.connect = hdmi_connect,
.disconnect = hdmi_disconnect,
@@ -627,6 +616,8 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.get_timings = hdmi_display_get_timings,
.read_edid = hdmi_read_edid,
+ .set_infoframe = hdmi_set_infoframe,
+ .set_hdmi_mode = hdmi_set_hdmi_mode,
.audio_enable = hdmi_audio_enable,
.audio_disable = hdmi_audio_disable,
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
index 8bde7b7e95ff..4ad39cfce254 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.c
@@ -197,9 +197,7 @@ int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
return l;
}
-static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
- struct hdmi_core_infoframe_avi *avi_cfg,
- struct hdmi_core_packet_enable_repeat *repeat_cfg)
+static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
{
DSSDBG("Enter hdmi_core_init\n");
@@ -210,35 +208,6 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
video_cfg->pkt_mode = HDMI_PACKETMODERESERVEDVALUE;
video_cfg->hdmi_dvi = HDMI_DVI;
video_cfg->tclk_sel_clkmult = HDMI_FPLL10IDCK;
-
- /* info frame */
- avi_cfg->db1_format = 0;
- avi_cfg->db1_active_info = 0;
- avi_cfg->db1_bar_info_dv = 0;
- avi_cfg->db1_scan_info = 0;
- avi_cfg->db2_colorimetry = 0;
- avi_cfg->db2_aspect_ratio = 0;
- avi_cfg->db2_active_fmt_ar = 0;
- avi_cfg->db3_itc = 0;
- avi_cfg->db3_ec = 0;
- avi_cfg->db3_q_range = 0;
- avi_cfg->db3_nup_scaling = 0;
- avi_cfg->db4_videocode = 0;
- avi_cfg->db5_pixel_repeat = 0;
- avi_cfg->db6_7_line_eoftop = 0;
- avi_cfg->db8_9_line_sofbottom = 0;
- avi_cfg->db10_11_pixel_eofleft = 0;
- avi_cfg->db12_13_pixel_sofright = 0;
-
- /* packet enable and repeat */
- repeat_cfg->audio_pkt = 0;
- repeat_cfg->audio_pkt_repeat = 0;
- repeat_cfg->avi_infoframe = 0;
- repeat_cfg->avi_infoframe_repeat = 0;
- repeat_cfg->gen_cntrl_pkt = 0;
- repeat_cfg->gen_cntrl_pkt_repeat = 0;
- repeat_cfg->generic_pkt = 0;
- repeat_cfg->generic_pkt_repeat = 0;
}
static void hdmi_core_powerdown_disable(struct hdmi_core_data *core)
@@ -303,80 +272,22 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
HDMI_CORE_SYS_TMDS_CTRL, cfg->tclk_sel_clkmult, 6, 5);
}
-static void hdmi_core_aux_infoframe_avi_config(struct hdmi_core_data *core)
+static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
+ struct hdmi_avi_infoframe *frame)
{
- u32 val;
- char sum = 0, checksum = 0;
void __iomem *av_base = hdmi_av_base(core);
- struct hdmi_core_infoframe_avi info_avi = core->avi_cfg;
-
- sum += 0x82 + 0x002 + 0x00D;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_TYPE, 0x082);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_VERS, 0x002);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_LEN, 0x00D);
-
- val = (info_avi.db1_format << 5) |
- (info_avi.db1_active_info << 4) |
- (info_avi.db1_bar_info_dv << 2) |
- (info_avi.db1_scan_info);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(0), val);
- sum += val;
-
- val = (info_avi.db2_colorimetry << 6) |
- (info_avi.db2_aspect_ratio << 4) |
- (info_avi.db2_active_fmt_ar);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(1), val);
- sum += val;
-
- val = (info_avi.db3_itc << 7) |
- (info_avi.db3_ec << 4) |
- (info_avi.db3_q_range << 2) |
- (info_avi.db3_nup_scaling);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(2), val);
- sum += val;
-
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(3),
- info_avi.db4_videocode);
- sum += info_avi.db4_videocode;
-
- val = info_avi.db5_pixel_repeat;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(4), val);
- sum += val;
-
- val = info_avi.db6_7_line_eoftop & 0x00FF;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(5), val);
- sum += val;
-
- val = ((info_avi.db6_7_line_eoftop >> 8) & 0x00FF);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(6), val);
- sum += val;
-
- val = info_avi.db8_9_line_sofbottom & 0x00FF;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(7), val);
- sum += val;
-
- val = ((info_avi.db8_9_line_sofbottom >> 8) & 0x00FF);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(8), val);
- sum += val;
-
- val = info_avi.db10_11_pixel_eofleft & 0x00FF;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(9), val);
- sum += val;
-
- val = ((info_avi.db10_11_pixel_eofleft >> 8) & 0x00FF);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(10), val);
- sum += val;
-
- val = info_avi.db12_13_pixel_sofright & 0x00FF;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(11), val);
- sum += val;
-
- val = ((info_avi.db12_13_pixel_sofright >> 8) & 0x00FF);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_DBYTE(12), val);
- sum += val;
+ u8 data[HDMI_INFOFRAME_SIZE(AVI)];
+ int i;
- checksum = 0x100 - sum;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_CHSUM, checksum);
+ hdmi_avi_infoframe_pack(frame, data, sizeof(data));
+
+ print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data,
+ HDMI_INFOFRAME_SIZE(AVI), false);
+
+ for (i = 0; i < sizeof(data); ++i) {
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AVI_BASE + i * 4,
+ data[i]);
+ }
}
static void hdmi_core_av_packet_config(struct hdmi_core_data *core,
@@ -404,11 +315,10 @@ void hdmi4_configure(struct hdmi_core_data *core,
struct omap_video_timings video_timing;
struct hdmi_video_format video_format;
/* HDMI core */
- struct hdmi_core_infoframe_avi *avi_cfg = &core->avi_cfg;
struct hdmi_core_video_config v_core_cfg;
- struct hdmi_core_packet_enable_repeat repeat_cfg;
+ struct hdmi_core_packet_enable_repeat repeat_cfg = { 0 };
- hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg);
+ hdmi_core_init(&v_core_cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
@@ -431,44 +341,24 @@ void hdmi4_configure(struct hdmi_core_data *core,
hdmi_core_powerdown_disable(core);
v_core_cfg.pkt_mode = HDMI_PACKETMODE24BITPERPIXEL;
- v_core_cfg.hdmi_dvi = cfg->cm.mode;
+ v_core_cfg.hdmi_dvi = cfg->hdmi_dvi_mode;
hdmi_core_video_config(core, &v_core_cfg);
/* release software reset in the core */
hdmi_core_swreset_release(core);
- /*
- * configure packet
- * info frame video see doc CEA861-D page 65
- */
- avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
- avi_cfg->db1_active_info =
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
- avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
- avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
- avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
- avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
- avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
- avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
- avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
- avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
- avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
- avi_cfg->db4_videocode = cfg->cm.code;
- avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
- avi_cfg->db6_7_line_eoftop = 0;
- avi_cfg->db8_9_line_sofbottom = 0;
- avi_cfg->db10_11_pixel_eofleft = 0;
- avi_cfg->db12_13_pixel_sofright = 0;
-
- hdmi_core_aux_infoframe_avi_config(core);
+ if (cfg->hdmi_dvi_mode == HDMI_HDMI) {
+ hdmi_core_write_avi_infoframe(core, &cfg->infoframe);
+
+ /* enable/repeat the infoframe */
+ repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
+ repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
+ /* wakeup */
+ repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
+ repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
+ }
- /* enable/repeat the infoframe */
- repeat_cfg.avi_infoframe = HDMI_PACKETENABLE;
- repeat_cfg.avi_infoframe_repeat = HDMI_PACKETREPEATON;
- /* wakeup */
- repeat_cfg.audio_pkt = HDMI_PACKETENABLE;
- repeat_cfg.audio_pkt_repeat = HDMI_PACKETREPEATON;
hdmi_core_av_packet_config(core, repeat_cfg);
}
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4_core.h b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
index bb646896fa82..827909eb6c50 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4_core.h
+++ b/drivers/video/fbdev/omap2/dss/hdmi4_core.h
@@ -145,6 +145,7 @@
#define HDMI_CORE_AV_DPD 0xF4
#define HDMI_CORE_AV_PB_CTRL1 0xF8
#define HDMI_CORE_AV_PB_CTRL2 0xFC
+#define HDMI_CORE_AV_AVI_BASE 0x100
#define HDMI_CORE_AV_AVI_TYPE 0x100
#define HDMI_CORE_AV_AVI_VERS 0x104
#define HDMI_CORE_AV_AVI_LEN 0x108
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c
index c468b9e1f295..32d02ec34d23 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5.c
@@ -299,29 +299,11 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- struct hdmi_cm cm;
- const struct hdmi_config *t;
-
mutex_lock(&hdmi.lock);
- cm = hdmi_get_code(timings);
- hdmi.cfg.cm = cm;
-
- t = hdmi_get_timings(cm.mode, cm.code);
- if (t != NULL) {
- hdmi.cfg = *t;
-
- dispc_set_tv_pclk(t->timings.pixelclock);
- } else {
- hdmi.cfg.timings = *timings;
- hdmi.cfg.cm.code = 0;
- hdmi.cfg.cm.mode = HDMI_DVI;
-
- dispc_set_tv_pclk(timings->pixelclock);
- }
+ hdmi.cfg.timings = *timings;
- DSSDBG("using mode: %s, code %d\n", hdmi.cfg.cm.mode == HDMI_DVI ?
- "DVI" : "HDMI", hdmi.cfg.cm.code);
+ dispc_set_tv_pclk(timings->pixelclock);
mutex_unlock(&hdmi.lock);
}
@@ -329,14 +311,7 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- const struct hdmi_config *cfg;
- struct hdmi_cm cm = hdmi.cfg.cm;
-
- cfg = hdmi_get_timings(cm.mode, cm.code);
- if (cfg == NULL)
- cfg = hdmi_default_timing();
-
- memcpy(timings, &cfg->timings, sizeof(cfg->timings));
+ *timings = hdmi.cfg.timings;
}
static void hdmi_dump_regs(struct seq_file *s)
@@ -541,7 +516,7 @@ static int hdmi_audio_enable(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
r = -EPERM;
goto err;
}
@@ -579,7 +554,7 @@ static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
mutex_lock(&hdmi.lock);
- r = hdmi_mode_has_audio(hdmi.cfg.cm.mode);
+ r = hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode);
mutex_unlock(&hdmi.lock);
return r;
@@ -593,7 +568,7 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev,
mutex_lock(&hdmi.lock);
- if (!hdmi_mode_has_audio(hdmi.cfg.cm.mode)) {
+ if (!hdmi_mode_has_audio(hdmi.cfg.hdmi_dvi_mode)) {
r = -EPERM;
goto err;
}
@@ -640,6 +615,20 @@ static int hdmi_audio_config(struct omap_dss_device *dssdev,
}
#endif
+static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
+ const struct hdmi_avi_infoframe *avi)
+{
+ hdmi.cfg.infoframe = *avi;
+ return 0;
+}
+
+static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
+ bool hdmi_mode)
+{
+ hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
+ return 0;
+}
+
static const struct omapdss_hdmi_ops hdmi_ops = {
.connect = hdmi_connect,
.disconnect = hdmi_disconnect,
@@ -652,6 +641,8 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.get_timings = hdmi_display_get_timings,
.read_edid = hdmi_read_edid,
+ .set_infoframe = hdmi_set_infoframe,
+ .set_hdmi_mode = hdmi_set_hdmi_mode,
.audio_enable = hdmi_audio_enable,
.audio_disable = hdmi_audio_disable,
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
index 7528c7a42aa5..83acbf7a8c89 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5_core.c
@@ -290,7 +290,6 @@ void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
}
static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
- struct hdmi_core_infoframe_avi *avi_cfg,
struct hdmi_config *cfg)
{
DSSDBG("hdmi_core_init\n");
@@ -312,27 +311,8 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
video_cfg->vblank_osc = 0; /* Always 0 - need to confirm */
video_cfg->vblank = cfg->timings.vsw +
cfg->timings.vfp + cfg->timings.vbp;
- video_cfg->v_fc_config.cm.mode = cfg->cm.mode;
+ video_cfg->v_fc_config.hdmi_dvi_mode = cfg->hdmi_dvi_mode;
video_cfg->v_fc_config.timings.interlace = cfg->timings.interlace;
-
- /* info frame */
- avi_cfg->db1_format = 0;
- avi_cfg->db1_active_info = 0;
- avi_cfg->db1_bar_info_dv = 0;
- avi_cfg->db1_scan_info = 0;
- avi_cfg->db2_colorimetry = 0;
- avi_cfg->db2_aspect_ratio = 0;
- avi_cfg->db2_active_fmt_ar = 0;
- avi_cfg->db3_itc = 0;
- avi_cfg->db3_ec = 0;
- avi_cfg->db3_q_range = 0;
- avi_cfg->db3_nup_scaling = 0;
- avi_cfg->db4_videocode = 0;
- avi_cfg->db5_pixel_repeat = 0;
- avi_cfg->db6_7_line_eoftop = 0;
- avi_cfg->db8_9_line_sofbottom = 0;
- avi_cfg->db10_11_pixel_eofleft = 0;
- avi_cfg->db12_13_pixel_sofright = 0;
}
/* DSS_HDMI_CORE_VIDEO_CONFIG */
@@ -398,7 +378,7 @@ static void hdmi_core_video_config(struct hdmi_core_data *core,
/* select DVI mode */
REG_FLD_MOD(base, HDMI_CORE_FC_INVIDCONF,
- cfg->v_fc_config.cm.mode, 3, 3);
+ cfg->v_fc_config.hdmi_dvi_mode, 3, 3);
}
static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core)
@@ -438,24 +418,60 @@ static void hdmi_core_config_video_sampler(struct hdmi_core_data *core)
REG_FLD_MOD(core->base, HDMI_CORE_TX_INVID0, video_mapping, 4, 0);
}
-static void hdmi_core_aux_infoframe_avi_config(struct hdmi_core_data *core)
+static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
+ struct hdmi_avi_infoframe *frame)
{
void __iomem *base = core->base;
- struct hdmi_core_infoframe_avi avi = core->avi_cfg;
-
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_format, 1, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_active_info, 6, 6);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_bar_info_dv, 3, 2);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF0, avi.db1_scan_info, 5, 4);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF1, avi.db2_colorimetry, 7, 6);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF1, avi.db2_aspect_ratio, 5, 4);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF1, avi.db2_active_fmt_ar, 3, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_itc, 7, 7);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_ec, 6, 4);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_q_range, 3, 2);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVICONF2, avi.db3_nup_scaling, 1, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_AVIVID, avi.db4_videocode, 6, 0);
- REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, avi.db5_pixel_repeat, 3, 0);
+ u8 data[HDMI_INFOFRAME_SIZE(AVI)];
+ u8 *ptr;
+ unsigned y, a, b, s;
+ unsigned c, m, r;
+ unsigned itc, ec, q, sc;
+ unsigned vic;
+ unsigned yq, cn, pr;
+
+ hdmi_avi_infoframe_pack(frame, data, sizeof(data));
+
+ print_hex_dump_debug("AVI: ", DUMP_PREFIX_NONE, 16, 1, data,
+ HDMI_INFOFRAME_SIZE(AVI), false);
+
+ ptr = data + HDMI_INFOFRAME_HEADER_SIZE;
+
+ y = (ptr[0] >> 5) & 0x3;
+ a = (ptr[0] >> 4) & 0x1;
+ b = (ptr[0] >> 2) & 0x3;
+ s = (ptr[0] >> 0) & 0x3;
+
+ c = (ptr[1] >> 6) & 0x3;
+ m = (ptr[1] >> 4) & 0x3;
+ r = (ptr[1] >> 0) & 0x3;
+
+ itc = (ptr[2] >> 7) & 0x1;
+ ec = (ptr[2] >> 4) & 0x7;
+ q = (ptr[2] >> 2) & 0x3;
+ sc = (ptr[2] >> 0) & 0x3;
+
+ vic = ptr[3];
+
+ yq = (ptr[4] >> 6) & 0x3;
+ cn = (ptr[4] >> 4) & 0x3;
+ pr = (ptr[4] >> 0) & 0xf;
+
+ hdmi_write_reg(base, HDMI_CORE_FC_AVICONF0,
+ (a << 6) | (s << 4) | (b << 2) | (y << 0));
+
+ hdmi_write_reg(base, HDMI_CORE_FC_AVICONF1,
+ (c << 6) | (m << 4) | (r << 0));
+
+ hdmi_write_reg(base, HDMI_CORE_FC_AVICONF2,
+ (itc << 7) | (ec << 4) | (q << 2) | (sc << 0));
+
+ hdmi_write_reg(base, HDMI_CORE_FC_AVIVID, vic);
+
+ hdmi_write_reg(base, HDMI_CORE_FC_AVICONF3,
+ (yq << 2) | (cn << 0));
+
+ REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0);
}
static void hdmi_core_csc_config(struct hdmi_core_data *core,
@@ -497,10 +513,8 @@ static void hdmi_core_configure_range(struct hdmi_core_data *core)
/* support limited range with 24 bit color depth for now */
csc_coeff = csc_table_deepcolor[0];
- core->avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_LR;
hdmi_core_csc_config(core, csc_coeff);
- hdmi_core_aux_infoframe_avi_config(core);
}
static void hdmi_core_enable_video_path(struct hdmi_core_data *core)
@@ -591,11 +605,10 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct omap_video_timings video_timing;
struct hdmi_video_format video_format;
struct hdmi_core_vid_config v_core_cfg;
- struct hdmi_core_infoframe_avi *avi_cfg = &core->avi_cfg;
hdmi_core_mask_interrupts(core);
- hdmi_core_init(&v_core_cfg, avi_cfg, cfg);
+ hdmi_core_init(&v_core_cfg, cfg);
hdmi_wp_init_vid_fmt_timings(&video_format, &video_timing, cfg);
@@ -608,7 +621,9 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_wp_video_config_interface(wp, &video_timing);
+ /* support limited range with 24 bit color depth for now */
hdmi_core_configure_range(core);
+ cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
/*
* configure core video part, set software reset in the core
@@ -621,29 +636,8 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
hdmi_core_config_csc(core);
hdmi_core_config_video_sampler(core);
- /*
- * configure packet info frame video see doc CEA861-D page 65
- */
- avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
- avi_cfg->db1_active_info =
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
- avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
- avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
- avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
- avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
- avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
- avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
- avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
- avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
- avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
- avi_cfg->db4_videocode = cfg->cm.code;
- avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
- avi_cfg->db6_7_line_eoftop = 0;
- avi_cfg->db8_9_line_sofbottom = 0;
- avi_cfg->db10_11_pixel_eofleft = 0;
- avi_cfg->db12_13_pixel_sofright = 0;
-
- hdmi_core_aux_infoframe_avi_config(core);
+ if (cfg->hdmi_dvi_mode == HDMI_HDMI)
+ hdmi_core_write_avi_infoframe(core, &cfg->infoframe);
hdmi_core_enable_video_path(core);
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_common.c b/drivers/video/fbdev/omap2/dss/hdmi_common.c
index 9a2c39cf297f..7d5f1039de9f 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_common.c
@@ -1,18 +1,4 @@
-/*
- * Logic for the below structure :
- * user enters the CEA or VESA timings by specifying the HDMI/DVI code.
- * There is a correspondence between CEA/VESA timing and code, please
- * refer to section 6.3 in HDMI 1.3 specification for timing code.
- *
- * In the below structure, cea_vesa_timings corresponds to all OMAP4
- * supported CEA and VESA timing values.code_cea corresponds to the CEA
- * code, It is used to get the timing from cea_vesa_timing array.Similarly
- * with code_vesa. Code_index is used for back mapping, that is once EDID
- * is read from the TV, EDID is parsed to find the timing values and then
- * map it to corresponding CEA or VESA index.
- */
-
#define DSS_SUBSYS_NAME "HDMI"
#include <linux/kernel.h>
@@ -22,308 +8,6 @@
#include "hdmi.h"
-static const struct hdmi_config cea_timings[] = {
- {
- { 640, 480, 25200000, 96, 16, 48, 2, 10, 33,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 1, HDMI_HDMI },
- },
- {
- { 720, 480, 27027000, 62, 16, 60, 6, 9, 30,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 2, HDMI_HDMI },
- },
- {
- { 1280, 720, 74250000, 40, 110, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 4, HDMI_HDMI },
- },
- {
- { 1920, 540, 74250000, 44, 88, 148, 5, 2, 15,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- true, },
- { 5, HDMI_HDMI },
- },
- {
- { 1440, 240, 27027000, 124, 38, 114, 3, 4, 15,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- true, },
- { 6, HDMI_HDMI },
- },
- {
- { 1920, 1080, 148500000, 44, 88, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 16, HDMI_HDMI },
- },
- {
- { 720, 576, 27000000, 64, 12, 68, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 17, HDMI_HDMI },
- },
- {
- { 1280, 720, 74250000, 40, 440, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 19, HDMI_HDMI },
- },
- {
- { 1920, 540, 74250000, 44, 528, 148, 5, 2, 15,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- true, },
- { 20, HDMI_HDMI },
- },
- {
- { 1440, 288, 27000000, 126, 24, 138, 3, 2, 19,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- true, },
- { 21, HDMI_HDMI },
- },
- {
- { 1440, 576, 54000000, 128, 24, 136, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 29, HDMI_HDMI },
- },
- {
- { 1920, 1080, 148500000, 44, 528, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 31, HDMI_HDMI },
- },
- {
- { 1920, 1080, 74250000, 44, 638, 148, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 32, HDMI_HDMI },
- },
- {
- { 2880, 480, 108108000, 248, 64, 240, 6, 9, 30,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 35, HDMI_HDMI },
- },
- {
- { 2880, 576, 108000000, 256, 48, 272, 5, 5, 39,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 37, HDMI_HDMI },
- },
-};
-
-static const struct hdmi_config vesa_timings[] = {
-/* VESA From Here */
- {
- { 640, 480, 25175000, 96, 16, 48, 2, 11, 31,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 4, HDMI_DVI },
- },
- {
- { 800, 600, 40000000, 128, 40, 88, 4, 1, 23,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 9, HDMI_DVI },
- },
- {
- { 848, 480, 33750000, 112, 16, 112, 8, 6, 23,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0xE, HDMI_DVI },
- },
- {
- { 1280, 768, 79500000, 128, 64, 192, 7, 3, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x17, HDMI_DVI },
- },
- {
- { 1280, 800, 83500000, 128, 72, 200, 6, 3, 22,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x1C, HDMI_DVI },
- },
- {
- { 1360, 768, 85500000, 112, 64, 256, 6, 3, 18,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x27, HDMI_DVI },
- },
- {
- { 1280, 960, 108000000, 112, 96, 312, 3, 1, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x20, HDMI_DVI },
- },
- {
- { 1280, 1024, 108000000, 112, 48, 248, 3, 1, 38,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x23, HDMI_DVI },
- },
- {
- { 1024, 768, 65000000, 136, 24, 160, 6, 3, 29,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x10, HDMI_DVI },
- },
- {
- { 1400, 1050, 121750000, 144, 88, 232, 4, 3, 32,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x2A, HDMI_DVI },
- },
- {
- { 1440, 900, 106500000, 152, 80, 232, 6, 3, 25,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x2F, HDMI_DVI },
- },
- {
- { 1680, 1050, 146250000, 176 , 104, 280, 6, 3, 30,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW,
- false, },
- { 0x3A, HDMI_DVI },
- },
- {
- { 1366, 768, 85500000, 143, 70, 213, 3, 3, 24,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x51, HDMI_DVI },
- },
- {
- { 1920, 1080, 148500000, 44, 148, 80, 5, 4, 36,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x52, HDMI_DVI },
- },
- {
- { 1280, 768, 68250000, 32, 48, 80, 7, 3, 12,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x16, HDMI_DVI },
- },
- {
- { 1400, 1050, 101000000, 32, 48, 80, 4, 3, 23,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x29, HDMI_DVI },
- },
- {
- { 1680, 1050, 119000000, 32, 48, 80, 6, 3, 21,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x39, HDMI_DVI },
- },
- {
- { 1280, 800, 79500000, 32, 48, 80, 6, 3, 14,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x1B, HDMI_DVI },
- },
- {
- { 1280, 720, 74250000, 40, 110, 220, 5, 5, 20,
- OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x55, HDMI_DVI },
- },
- {
- { 1920, 1200, 154000000, 32, 48, 80, 6, 3, 26,
- OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH,
- false, },
- { 0x44, HDMI_DVI },
- },
-};
-
-const struct hdmi_config *hdmi_default_timing(void)
-{
- return &vesa_timings[0];
-}
-
-static const struct hdmi_config *hdmi_find_timing(int code,
- const struct hdmi_config *timings_arr, int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- if (timings_arr[i].cm.code == code)
- return &timings_arr[i];
- }
-
- return NULL;
-}
-
-const struct hdmi_config *hdmi_get_timings(int mode, int code)
-{
- const struct hdmi_config *arr;
- int len;
-
- if (mode == HDMI_DVI) {
- arr = vesa_timings;
- len = ARRAY_SIZE(vesa_timings);
- } else {
- arr = cea_timings;
- len = ARRAY_SIZE(cea_timings);
- }
-
- return hdmi_find_timing(code, arr, len);
-}
-
-static bool hdmi_timings_compare(struct omap_video_timings *timing1,
- const struct omap_video_timings *timing2)
-{
- int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync;
-
- if ((DIV_ROUND_CLOSEST(timing2->pixelclock, 1000000) ==
- DIV_ROUND_CLOSEST(timing1->pixelclock, 1000000)) &&
- (timing2->x_res == timing1->x_res) &&
- (timing2->y_res == timing1->y_res)) {
-
- timing2_hsync = timing2->hfp + timing2->hsw + timing2->hbp;
- timing1_hsync = timing1->hfp + timing1->hsw + timing1->hbp;
- timing2_vsync = timing2->vfp + timing2->vsw + timing2->vbp;
- timing1_vsync = timing1->vfp + timing1->vsw + timing1->vbp;
-
- DSSDBG("timing1_hsync = %d timing1_vsync = %d"\
- "timing2_hsync = %d timing2_vsync = %d\n",
- timing1_hsync, timing1_vsync,
- timing2_hsync, timing2_vsync);
-
- if ((timing1_hsync == timing2_hsync) &&
- (timing1_vsync == timing2_vsync)) {
- return true;
- }
- }
- return false;
-}
-
-struct hdmi_cm hdmi_get_code(struct omap_video_timings *timing)
-{
- int i;
- struct hdmi_cm cm = {-1};
- DSSDBG("hdmi_get_code\n");
-
- for (i = 0; i < ARRAY_SIZE(cea_timings); i++) {
- if (hdmi_timings_compare(timing, &cea_timings[i].timings)) {
- cm = cea_timings[i].cm;
- goto end;
- }
- }
- for (i = 0; i < ARRAY_SIZE(vesa_timings); i++) {
- if (hdmi_timings_compare(timing, &vesa_timings[i].timings)) {
- cm = vesa_timings[i].cm;
- goto end;
- }
- }
-
-end:
- return cm;
-}
-
int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
struct hdmi_phy_data *phy)
{
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 8a8d7f060784..be73727c7227 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -326,13 +326,7 @@ static int riva_bl_update_status(struct backlight_device *bd)
return 0;
}
-static int riva_bl_get_brightness(struct backlight_device *bd)
-{
- return bd->props.brightness;
-}
-
static const struct backlight_ops riva_bl_ops = {
- .get_brightness = riva_bl_get_brightness,
.update_status = riva_bl_update_status,
};
diff --git a/drivers/video/fbdev/s3c-fb.c b/drivers/video/fbdev/s3c-fb.c
index 62acae2694a9..b33abb0a433d 100644
--- a/drivers/video/fbdev/s3c-fb.c
+++ b/drivers/video/fbdev/s3c-fb.c
@@ -1805,38 +1805,6 @@ static struct s3c_fb_driverdata s3c_fb_data_64xx = {
.win[4] = &s3c_fb_data_64xx_wins[4],
};
-static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = {
- .variant = {
- .nr_windows = 5,
- .vidtcon = VIDTCON0,
- .wincon = WINCON(0),
- .winmap = WINxMAP(0),
- .keycon = WKEYCON,
- .osd = VIDOSD_BASE,
- .osd_stride = 16,
- .buf_start = VIDW_BUF_START(0),
- .buf_size = VIDW_BUF_SIZE(0),
- .buf_end = VIDW_BUF_END(0),
-
- .palette = {
- [0] = 0x2400,
- [1] = 0x2800,
- [2] = 0x2c00,
- [3] = 0x3000,
- [4] = 0x3400,
- },
-
- .has_prtcon = 1,
- .has_blendcon = 1,
- .has_clksel = 1,
- },
- .win[0] = &s3c_fb_data_s5p_wins[0],
- .win[1] = &s3c_fb_data_s5p_wins[1],
- .win[2] = &s3c_fb_data_s5p_wins[2],
- .win[3] = &s3c_fb_data_s5p_wins[3],
- .win[4] = &s3c_fb_data_s5p_wins[4],
-};
-
static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = {
.variant = {
.nr_windows = 5,
@@ -1970,41 +1938,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s3c2443 = {
},
};
-static struct s3c_fb_driverdata s3c_fb_data_s5p64x0 = {
- .variant = {
- .nr_windows = 3,
- .vidtcon = VIDTCON0,
- .wincon = WINCON(0),
- .winmap = WINxMAP(0),
- .keycon = WKEYCON,
- .osd = VIDOSD_BASE,
- .osd_stride = 16,
- .buf_start = VIDW_BUF_START(0),
- .buf_size = VIDW_BUF_SIZE(0),
- .buf_end = VIDW_BUF_END(0),
-
- .palette = {
- [0] = 0x2400,
- [1] = 0x2800,
- [2] = 0x2c00,
- },
-
- .has_blendcon = 1,
- .has_fixvclk = 1,
- },
- .win[0] = &s3c_fb_data_s5p_wins[0],
- .win[1] = &s3c_fb_data_s5p_wins[1],
- .win[2] = &s3c_fb_data_s5p_wins[2],
-};
-
static struct platform_device_id s3c_fb_driver_ids[] = {
{
.name = "s3c-fb",
.driver_data = (unsigned long)&s3c_fb_data_64xx,
}, {
- .name = "s5pc100-fb",
- .driver_data = (unsigned long)&s3c_fb_data_s5pc100,
- }, {
.name = "s5pv210-fb",
.driver_data = (unsigned long)&s3c_fb_data_s5pv210,
}, {
@@ -2016,9 +1954,6 @@ static struct platform_device_id s3c_fb_driver_ids[] = {
}, {
.name = "s3c2443-fb",
.driver_data = (unsigned long)&s3c_fb_data_s3c2443,
- }, {
- .name = "s5p64x0-fb",
- .driver_data = (unsigned long)&s3c_fb_data_s5p64x0,
},
{},
};
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
index 81af5a63e9e1..43c63a4f3178 100644
--- a/drivers/video/fbdev/s3c2410fb.c
+++ b/drivers/video/fbdev/s3c2410fb.c
@@ -616,7 +616,7 @@ static int s3c2410fb_debug_store(struct device *dev,
return len;
}
-static DEVICE_ATTR(debug, 0666, s3c2410fb_debug_show, s3c2410fb_debug_store);
+static DEVICE_ATTR(debug, 0664, s3c2410fb_debug_show, s3c2410fb_debug_store);
static struct fb_ops s3c2410fb_ops = {
.owner = THIS_MODULE,
@@ -932,7 +932,7 @@ static int s3c24xxfb_probe(struct platform_device *pdev,
goto release_irq;
}
- clk_enable(info->clk);
+ clk_prepare_enable(info->clk);
dprintk("got and enabled clock\n");
usleep_range(1000, 1100);
@@ -996,7 +996,7 @@ static int s3c24xxfb_probe(struct platform_device *pdev,
free_video_memory:
s3c2410fb_unmap_video_memory(fbinfo);
release_clock:
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
clk_put(info->clk);
release_irq:
free_irq(irq, info);
@@ -1038,7 +1038,7 @@ static int s3c2410fb_remove(struct platform_device *pdev)
s3c2410fb_unmap_video_memory(fbinfo);
if (info->clk) {
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
clk_put(info->clk);
info->clk = NULL;
}
@@ -1070,7 +1070,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state)
* before the clock goes off again (bjd) */
usleep_range(1000, 1100);
- clk_disable(info->clk);
+ clk_disable_unprepare(info->clk);
return 0;
}
@@ -1080,7 +1080,7 @@ static int s3c2410fb_resume(struct platform_device *dev)
struct fb_info *fbinfo = platform_get_drvdata(dev);
struct s3c2410fb_info *info = fbinfo->par;
- clk_enable(info->clk);
+ clk_prepare_enable(info->clk);
usleep_range(1000, 1100);
s3c2410fb_init_registers(fbinfo);
diff --git a/drivers/video/fbdev/sis/init.c b/drivers/video/fbdev/sis/init.c
index bd40f5ecd901..dfe3eb769638 100644
--- a/drivers/video/fbdev/sis/init.c
+++ b/drivers/video/fbdev/sis/init.c
@@ -1511,7 +1511,7 @@ SiS_Get310DRAMType(struct SiS_Private *SiS_Pr)
} else if(SiS_Pr->ChipType >= SIS_340) {
/* TODO */
data = 0;
- } if(SiS_Pr->ChipType >= SIS_661) {
+ } else if(SiS_Pr->ChipType >= SIS_661) {
if(SiS_Pr->SiS_ROMNew) {
data = ((SiS_GetReg(SiS_Pr->SiS_P3d4,0x78) & 0xc0) >> 6);
} else {
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 22ad028bf123..3f12a2dd959a 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -1572,10 +1572,6 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
/* Adapt RGB settings */
sisfb_bpp_to_var(ivideo, var);
- /* Sanity check for offsets */
- if(var->xoffset < 0) var->xoffset = 0;
- if(var->yoffset < 0) var->yoffset = 0;
-
if(var->xres > var->xres_virtual)
var->xres_virtual = var->xres;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 101db3faf5d4..3d1463c6b120 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -91,7 +91,7 @@ struct virtio_pci_vq_info
};
/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
-static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = {
+static const struct pci_device_id virtio_pci_id_table[] = {
{ PCI_DEVICE(0x1af4, PCI_ANY_ID) },
{ 0 }
};
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index bfb2d3f06738..18078ecbfcc6 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -1555,16 +1555,14 @@ static int ca91cx42_crcsr_init(struct vme_bridge *ca91cx42_bridge,
}
/* Allocate mem for CR/CSR image */
- bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &bridge->crcsr_bus);
+ bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
+ &bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
"image\n");
return -ENOMEM;
}
- memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
-
crcsr_addr = slot * (512 * 1024);
iowrite32(bridge->crcsr_bus - crcsr_addr, bridge->base + VCSR_TO);
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 61e706c0e00c..e07cfa8001bb 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -2275,16 +2275,14 @@ static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
bridge = tsi148_bridge->driver_priv;
/* Allocate mem for CR/CSR image */
- bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
- &bridge->crcsr_bus);
+ bridge->crcsr_kernel = pci_zalloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
+ &bridge->crcsr_bus);
if (bridge->crcsr_kernel == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
}
- memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
-
reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 76dd54122f76..f57312fced80 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1293,7 +1293,7 @@ config DIAG288_WATCHDOG
both.
To compile this driver as a module, choose M here. The module
- will be called vmwatchdog.
+ will be called diag288_wdt.
# SUPERH (sh + sh64) Architecture
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index ee4f86ba83ec..9f210299de24 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -296,9 +296,6 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
int ret;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -EINVAL;
-
dw_wdt.regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(dw_wdt.regs))
return PTR_ERR(dw_wdt.regs);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 9d4874f09948..68c3d379ffa8 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -28,6 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/timer.h>
@@ -190,10 +191,12 @@ static struct regmap_config imx2_wdt_regmap_config = {
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct imx2_wdt_device *wdev;
struct watchdog_device *wdog;
struct resource *res;
void __iomem *base;
+ bool big_endian;
int ret;
u32 val;
@@ -201,6 +204,10 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (!wdev)
return -ENOMEM;
+ big_endian = of_property_read_bool(np, "big-endian");
+ if (big_endian)
+ imx2_wdt_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 3b3148c764a3..021e84eb88eb 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -192,11 +192,6 @@ ltq_wdt_probe(struct platform_device *pdev)
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct clk *clk;
- if (!res) {
- dev_err(&pdev->dev, "cannot obtain I/O memory region");
- return -ENOENT;
- }
-
ltq_wdt_membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ltq_wdt_membase))
return PTR_ERR(ltq_wdt_membase);
diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c
index 4baf2d788920..8453531545df 100644
--- a/drivers/watchdog/octeon-wdt-main.c
+++ b/drivers/watchdog/octeon-wdt-main.c
@@ -145,35 +145,39 @@ static void __init octeon_wdt_build_stage1(void)
uasm_i_mfc0(&p, K0, C0_STATUS);
#ifdef CONFIG_HOTPLUG_CPU
- uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI), label_enter_bootloader);
+ if (octeon_bootloader_entry_addr)
+ uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI),
+ label_enter_bootloader);
#endif
/* Force 64-bit addressing enabled */
uasm_i_ori(&p, K0, K0, ST0_UX | ST0_SX | ST0_KX);
uasm_i_mtc0(&p, K0, C0_STATUS);
#ifdef CONFIG_HOTPLUG_CPU
- uasm_i_mfc0(&p, K0, C0_EBASE);
- /* Coreid number in K0 */
- uasm_i_andi(&p, K0, K0, 0xf);
- /* 8 * coreid in bits 16-31 */
- uasm_i_dsll_safe(&p, K0, K0, 3 + 16);
- uasm_i_ori(&p, K0, K0, 0x8001);
- uasm_i_dsll_safe(&p, K0, K0, 16);
- uasm_i_ori(&p, K0, K0, 0x0700);
- uasm_i_drotr_safe(&p, K0, K0, 32);
- /*
- * Should result in: 0x8001,0700,0000,8*coreid which is
- * CVMX_CIU_WDOGX(coreid) - 0x0500
- *
- * Now ld K0, CVMX_CIU_WDOGX(coreid)
- */
- uasm_i_ld(&p, K0, 0x500, K0);
- /*
- * If bit one set handle the NMI as a watchdog event.
- * otherwise transfer control to bootloader.
- */
- uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader);
- uasm_i_nop(&p);
+ if (octeon_bootloader_entry_addr) {
+ uasm_i_mfc0(&p, K0, C0_EBASE);
+ /* Coreid number in K0 */
+ uasm_i_andi(&p, K0, K0, 0xf);
+ /* 8 * coreid in bits 16-31 */
+ uasm_i_dsll_safe(&p, K0, K0, 3 + 16);
+ uasm_i_ori(&p, K0, K0, 0x8001);
+ uasm_i_dsll_safe(&p, K0, K0, 16);
+ uasm_i_ori(&p, K0, K0, 0x0700);
+ uasm_i_drotr_safe(&p, K0, K0, 32);
+ /*
+ * Should result in: 0x8001,0700,0000,8*coreid which is
+ * CVMX_CIU_WDOGX(coreid) - 0x0500
+ *
+ * Now ld K0, CVMX_CIU_WDOGX(coreid)
+ */
+ uasm_i_ld(&p, K0, 0x500, K0);
+ /*
+ * If bit one set handle the NMI as a watchdog event.
+ * otherwise transfer control to bootloader.
+ */
+ uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader);
+ uasm_i_nop(&p);
+ }
#endif
/* Clear Dcache so cvmseg works right. */
@@ -194,11 +198,13 @@ static void __init octeon_wdt_build_stage1(void)
uasm_i_dmfc0(&p, K0, C0_DESAVE);
#ifdef CONFIG_HOTPLUG_CPU
- uasm_build_label(&l, p, label_enter_bootloader);
- /* Jump to the bootloader and restore K0 */
- UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr);
- uasm_i_jr(&p, K0);
- uasm_i_dmfc0(&p, K0, C0_DESAVE);
+ if (octeon_bootloader_entry_addr) {
+ uasm_build_label(&l, p, label_enter_bootloader);
+ /* Jump to the bootloader and restore K0 */
+ UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr);
+ uasm_i_jr(&p, K0);
+ uasm_i_dmfc0(&p, K0, C0_DESAVE);
+ }
#endif
uasm_resolve_relocs(relocs, labels);
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 061756e36cf8..fa89bb30d004 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -230,10 +230,6 @@ static int sh_wdt_probe(struct platform_device *pdev)
if (pdev->id != -1)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (unlikely(!res))
- return -EINVAL;
-
wdt = devm_kzalloc(&pdev->dev, sizeof(struct sh_wdt), GFP_KERNEL);
if (unlikely(!wdt))
return -ENOMEM;
@@ -249,6 +245,7 @@ static int sh_wdt_probe(struct platform_device *pdev)
wdt->clk = NULL;
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->base = devm_ioremap_resource(wdt->dev, res);
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 693b9d2c6e39..60deb9d304c0 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -22,9 +23,12 @@
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reboot.h>
#include <linux/types.h>
#include <linux/watchdog.h>
+#include <asm/system_misc.h>
+
#define WDT_MAX_TIMEOUT 16
#define WDT_MIN_TIMEOUT 1
#define WDT_MODE_TIMEOUT(n) ((n) << 3)
@@ -70,6 +74,26 @@ static const int wdt_timeout_map[] = {
[16] = 0xB, /* 16s */
};
+static void __iomem *reboot_wdt_base;
+
+static void sun4i_wdt_restart(enum reboot_mode mode, const char *cmd)
+{
+ /* Enable timer and set reset bit in the watchdog */
+ writel(WDT_MODE_EN | WDT_MODE_RST_EN, reboot_wdt_base + WDT_MODE);
+
+ /*
+ * Restart the watchdog. The default (and lowest) interval
+ * value for the watchdog is 0.5s.
+ */
+ writel(WDT_CTRL_RELOAD, reboot_wdt_base + WDT_CTRL);
+
+ while (1) {
+ mdelay(5);
+ writel(WDT_MODE_EN | WDT_MODE_RST_EN,
+ reboot_wdt_base + WDT_MODE);
+ }
+}
+
static int sunxi_wdt_ping(struct watchdog_device *wdt_dev)
{
struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
@@ -181,6 +205,9 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
if (unlikely(err))
return err;
+ reboot_wdt_base = sunxi_wdt->wdt_base;
+ arm_pm_restart = sun4i_wdt_restart;
+
dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
sunxi_wdt->wdt_dev.timeout, nowayout);
@@ -191,6 +218,8 @@ static int sunxi_wdt_remove(struct platform_device *pdev)
{
struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
+ arm_pm_restart = NULL;
+
watchdog_unregister_device(&sunxi_wdt->wdt_dev);
watchdog_set_drvdata(&sunxi_wdt->wdt_dev, NULL);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index c919d3d5c845..5b5c5ff273fd 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -246,7 +246,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
*/
unsigned int evtchn_from_irq(unsigned irq)
{
- if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
+ if (unlikely(WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)))
return 0;
return info_for_irq(irq)->evtchn;
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 84b4bfb84344..417415d738d0 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -67,10 +67,9 @@ static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
static unsigned event_array_pages __read_mostly;
/*
- * sync_set_bit() and friends must be unsigned long aligned on non-x86
- * platforms.
+ * sync_set_bit() and friends must be unsigned long aligned.
*/
-#if !defined(CONFIG_X86) && BITS_PER_LONG > 32
+#if BITS_PER_LONG > 32
#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
#define EVTCHN_FIFO_BIT(b, w) \
@@ -100,6 +99,25 @@ static unsigned evtchn_fifo_nr_channels(void)
return event_array_pages * EVENT_WORDS_PER_PAGE;
}
+static int init_control_block(int cpu,
+ struct evtchn_fifo_control_block *control_block)
+{
+ struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
+ struct evtchn_init_control init_control;
+ unsigned int i;
+
+ /* Reset the control block and the local HEADs. */
+ clear_page(control_block);
+ for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
+ q->head[i] = 0;
+
+ init_control.control_gfn = virt_to_mfn(control_block);
+ init_control.offset = 0;
+ init_control.vcpu = cpu;
+
+ return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
+}
+
static void free_unused_array_pages(void)
{
unsigned i;
@@ -312,7 +330,7 @@ static void evtchn_fifo_handle_events(unsigned cpu)
ready = xchg(&control_block->ready, 0);
while (ready) {
- q = find_first_bit(BM(&ready), EVTCHN_FIFO_MAX_QUEUES);
+ q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
consume_one_event(cpu, control_block, q, &ready);
ready |= xchg(&control_block->ready, 0);
}
@@ -324,7 +342,6 @@ static void evtchn_fifo_resume(void)
for_each_possible_cpu(cpu) {
void *control_block = per_cpu(cpu_control_block, cpu);
- struct evtchn_init_control init_control;
int ret;
if (!control_block)
@@ -341,12 +358,7 @@ static void evtchn_fifo_resume(void)
continue;
}
- init_control.control_gfn = virt_to_mfn(control_block);
- init_control.offset = 0;
- init_control.vcpu = cpu;
-
- ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control,
- &init_control);
+ ret = init_control_block(cpu, control_block);
if (ret < 0)
BUG();
}
@@ -374,30 +386,25 @@ static const struct evtchn_ops evtchn_ops_fifo = {
.resume = evtchn_fifo_resume,
};
-static int evtchn_fifo_init_control_block(unsigned cpu)
+static int evtchn_fifo_alloc_control_block(unsigned cpu)
{
- struct page *control_block = NULL;
- struct evtchn_init_control init_control;
+ void *control_block = NULL;
int ret = -ENOMEM;
- control_block = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ control_block = (void *)__get_free_page(GFP_KERNEL);
if (control_block == NULL)
goto error;
- init_control.control_gfn = virt_to_mfn(page_address(control_block));
- init_control.offset = 0;
- init_control.vcpu = cpu;
-
- ret = HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
+ ret = init_control_block(cpu, control_block);
if (ret < 0)
goto error;
- per_cpu(cpu_control_block, cpu) = page_address(control_block);
+ per_cpu(cpu_control_block, cpu) = control_block;
return 0;
error:
- __free_page(control_block);
+ free_page((unsigned long)control_block);
return ret;
}
@@ -411,7 +418,7 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
switch (action) {
case CPU_UP_PREPARE:
if (!per_cpu(cpu_control_block, cpu))
- ret = evtchn_fifo_init_control_block(cpu);
+ ret = evtchn_fifo_alloc_control_block(cpu);
break;
default:
break;
@@ -428,7 +435,7 @@ int __init xen_evtchn_fifo_init(void)
int cpu = get_cpu();
int ret;
- ret = evtchn_fifo_init_control_block(cpu);
+ ret = evtchn_fifo_alloc_control_block(cpu);
if (ret < 0)
goto out;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index eeba7544f0cd..c254ae036f18 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -69,7 +69,6 @@ struct grant_frames xen_auto_xlat_grant_frames;
static union {
struct grant_entry_v1 *v1;
- union grant_entry_v2 *v2;
void *addr;
} gnttab_shared;
@@ -120,36 +119,10 @@ struct gnttab_ops {
* by bit operations.
*/
int (*query_foreign_access)(grant_ref_t ref);
- /*
- * Grant a domain to access a range of bytes within the page referred by
- * an available grant entry. Ref parameter is reference of a grant entry
- * which will be sub-page accessed, domid is id of grantee domain, frame
- * is frame address of subpage grant, flags is grant type and flag
- * information, page_off is offset of the range of bytes, and length is
- * length of bytes to be accessed.
- */
- void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
- unsigned long frame, int flags,
- unsigned page_off, unsigned length);
- /*
- * Redirect an available grant entry on domain A to another grant
- * reference of domain B, then allow domain C to use grant reference
- * of domain B transitively. Ref parameter is an available grant entry
- * reference on domain A, domid is id of domain C which accesses grant
- * entry transitively, flags is grant type and flag information,
- * trans_domid is id of domain B whose grant entry is finally accessed
- * transitively, trans_gref is grant entry transitive reference of
- * domain B.
- */
- void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
- domid_t trans_domid, grant_ref_t trans_gref);
};
static struct gnttab_ops *gnttab_interface;
-/*This reflects status of grant entries, so act as a global value*/
-static grant_status_t *grstatus;
-
static int grant_table_version;
static int grefs_per_grant_frame;
@@ -231,7 +204,7 @@ static void put_free_entry(grant_ref_t ref)
}
/*
- * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
+ * Following applies to gnttab_update_entry_v1.
* Introducing a valid entry into the grant table:
* 1. Write ent->domid.
* 2. Write ent->frame:
@@ -250,15 +223,6 @@ static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
gnttab_shared.v1[ref].flags = flags;
}
-static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
- unsigned long frame, unsigned flags)
-{
- gnttab_shared.v2[ref].hdr.domid = domid;
- gnttab_shared.v2[ref].full_page.frame = frame;
- wmb();
- gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
-}
-
/*
* Public grant-issuing interface functions
*/
@@ -285,132 +249,11 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
-static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
- unsigned long frame, int flags,
- unsigned page_off, unsigned length)
-{
- gnttab_shared.v2[ref].sub_page.frame = frame;
- gnttab_shared.v2[ref].sub_page.page_off = page_off;
- gnttab_shared.v2[ref].sub_page.length = length;
- gnttab_shared.v2[ref].hdr.domid = domid;
- wmb();
- gnttab_shared.v2[ref].hdr.flags =
- GTF_permit_access | GTF_sub_page | flags;
-}
-
-int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
- unsigned long frame, int flags,
- unsigned page_off,
- unsigned length)
-{
- if (flags & (GTF_accept_transfer | GTF_reading |
- GTF_writing | GTF_transitive))
- return -EPERM;
-
- if (gnttab_interface->update_subpage_entry == NULL)
- return -ENOSYS;
-
- gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
- page_off, length);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
-
-int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
- int flags, unsigned page_off,
- unsigned length)
-{
- int ref, rc;
-
- ref = get_free_entries(1);
- if (unlikely(ref < 0))
- return -ENOSPC;
-
- rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
- page_off, length);
- if (rc < 0) {
- put_free_entry(ref);
- return rc;
- }
-
- return ref;
-}
-EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
-
-bool gnttab_subpage_grants_available(void)
-{
- return gnttab_interface->update_subpage_entry != NULL;
-}
-EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
-
-static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
- int flags, domid_t trans_domid,
- grant_ref_t trans_gref)
-{
- gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
- gnttab_shared.v2[ref].transitive.gref = trans_gref;
- gnttab_shared.v2[ref].hdr.domid = domid;
- wmb();
- gnttab_shared.v2[ref].hdr.flags =
- GTF_permit_access | GTF_transitive | flags;
-}
-
-int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
- int flags, domid_t trans_domid,
- grant_ref_t trans_gref)
-{
- if (flags & (GTF_accept_transfer | GTF_reading |
- GTF_writing | GTF_sub_page))
- return -EPERM;
-
- if (gnttab_interface->update_trans_entry == NULL)
- return -ENOSYS;
-
- gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
- trans_gref);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
-
-int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
- domid_t trans_domid,
- grant_ref_t trans_gref)
-{
- int ref, rc;
-
- ref = get_free_entries(1);
- if (unlikely(ref < 0))
- return -ENOSPC;
-
- rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
- trans_domid, trans_gref);
- if (rc < 0) {
- put_free_entry(ref);
- return rc;
- }
-
- return ref;
-}
-EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
-
-bool gnttab_trans_grants_available(void)
-{
- return gnttab_interface->update_trans_entry != NULL;
-}
-EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
-
static int gnttab_query_foreign_access_v1(grant_ref_t ref)
{
return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
}
-static int gnttab_query_foreign_access_v2(grant_ref_t ref)
-{
- return grstatus[ref] & (GTF_reading|GTF_writing);
-}
-
int gnttab_query_foreign_access(grant_ref_t ref)
{
return gnttab_interface->query_foreign_access(ref);
@@ -433,29 +276,6 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
return 1;
}
-static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
-{
- gnttab_shared.v2[ref].hdr.flags = 0;
- mb();
- if (grstatus[ref] & (GTF_reading|GTF_writing)) {
- return 0;
- } else {
- /* The read of grstatus needs to have acquire
- semantics. On x86, reads already have
- that, and we just need to protect against
- compiler reorderings. On other
- architectures we may need a full
- barrier. */
-#ifdef CONFIG_X86
- barrier();
-#else
- mb();
-#endif
- }
-
- return 1;
-}
-
static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
{
return gnttab_interface->end_foreign_access_ref(ref, readonly);
@@ -616,37 +436,6 @@ static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
return frame;
}
-static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
-{
- unsigned long frame;
- u16 flags;
- u16 *pflags;
-
- pflags = &gnttab_shared.v2[ref].hdr.flags;
-
- /*
- * If a transfer is not even yet started, try to reclaim the grant
- * reference and return failure (== 0).
- */
- while (!((flags = *pflags) & GTF_transfer_committed)) {
- if (sync_cmpxchg(pflags, flags, 0) == flags)
- return 0;
- cpu_relax();
- }
-
- /* If a transfer is in progress then wait until it is completed. */
- while (!(flags & GTF_transfer_completed)) {
- flags = *pflags;
- cpu_relax();
- }
-
- rmb(); /* Read the frame number /after/ reading completion status. */
- frame = gnttab_shared.v2[ref].full_page.frame;
- BUG_ON(frame == 0);
-
- return frame;
-}
-
unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
{
return gnttab_interface->end_foreign_transfer_ref(ref);
@@ -962,12 +751,6 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
-static unsigned nr_status_frames(unsigned nr_grant_frames)
-{
- BUG_ON(grefs_per_grant_frame == 0);
- return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
-}
-
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
@@ -985,55 +768,6 @@ static void gnttab_unmap_frames_v1(void)
arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
}
-static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
-{
- uint64_t *sframes;
- unsigned int nr_sframes;
- struct gnttab_get_status_frames getframes;
- int rc;
-
- nr_sframes = nr_status_frames(nr_gframes);
-
- /* No need for kzalloc as it is initialized in following hypercall
- * GNTTABOP_get_status_frames.
- */
- sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
- if (!sframes)
- return -ENOMEM;
-
- getframes.dom = DOMID_SELF;
- getframes.nr_frames = nr_sframes;
- set_xen_guest_handle(getframes.frame_list, sframes);
-
- rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
- &getframes, 1);
- if (rc == -ENOSYS) {
- kfree(sframes);
- return -ENOSYS;
- }
-
- BUG_ON(rc || getframes.status);
-
- rc = arch_gnttab_map_status(sframes, nr_sframes,
- nr_status_frames(gnttab_max_grant_frames()),
- &grstatus);
- BUG_ON(rc);
- kfree(sframes);
-
- rc = arch_gnttab_map_shared(frames, nr_gframes,
- gnttab_max_grant_frames(),
- &gnttab_shared.addr);
- BUG_ON(rc);
-
- return 0;
-}
-
-static void gnttab_unmap_frames_v2(void)
-{
- arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
- arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
-}
-
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
@@ -1101,43 +835,13 @@ static struct gnttab_ops gnttab_v1_ops = {
.query_foreign_access = gnttab_query_foreign_access_v1,
};
-static struct gnttab_ops gnttab_v2_ops = {
- .map_frames = gnttab_map_frames_v2,
- .unmap_frames = gnttab_unmap_frames_v2,
- .update_entry = gnttab_update_entry_v2,
- .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
- .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
- .query_foreign_access = gnttab_query_foreign_access_v2,
- .update_subpage_entry = gnttab_update_subpage_entry_v2,
- .update_trans_entry = gnttab_update_trans_entry_v2,
-};
-
static void gnttab_request_version(void)
{
- int rc;
- struct gnttab_set_version gsv;
+ /* Only version 1 is used, which will always be available. */
+ grant_table_version = 1;
+ grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
+ gnttab_interface = &gnttab_v1_ops;
- gsv.version = 1;
-
- rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
- if (rc == 0 && gsv.version == 2) {
- grant_table_version = 2;
- grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
- gnttab_interface = &gnttab_v2_ops;
- } else if (grant_table_version == 2) {
- /*
- * If we've already used version 2 features,
- * but then suddenly discover that they're not
- * available (e.g. migrating to an older
- * version of Xen), almost unbounded badness
- * can happen.
- */
- panic("we need grant tables version 2, but only version 1 is available");
- } else {
- grant_table_version = 1;
- grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
- gnttab_interface = &gnttab_v1_ops;
- }
pr_info("Grant tables using version %d layout\n", grant_table_version);
}
@@ -1225,8 +929,7 @@ int gnttab_init(void)
}
}
- ret = arch_gnttab_init(max_nr_grant_frames,
- nr_status_frames(max_nr_grant_frames));
+ ret = arch_gnttab_init(max_nr_grant_frames);
if (ret < 0)
goto ini_nomem;
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 4a7e6e0a5f4c..c214daab4829 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -174,6 +174,7 @@ static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
"version mismatch (%s/%s) with pcifront - "
"halting " DRV_NAME,
magic, XEN_PCI_MAGIC);
+ err = -EFAULT;
goto out;
}
OpenPOWER on IntegriCloud